diff options
author | Michael Ellerman <mpe@ellerman.id.au> | 2021-05-08 14:12:55 +0300 |
---|---|---|
committer | Michael Ellerman <mpe@ellerman.id.au> | 2021-05-08 14:12:55 +0300 |
commit | f96271cefe6dfd1cb04195b76f4a33e185cd7f92 (patch) | |
tree | f290b3e7aa9c12fdc4853ce97c150c4f9abd90a3 /arch | |
parent | 32b48bf8514c28cdc89cd8069eceeb6e6cff0612 (diff) | |
parent | dd860052c99b1e088352bdd4fb7aef46f8d2ef47 (diff) | |
download | linux-f96271cefe6dfd1cb04195b76f4a33e185cd7f92.tar.xz |
Merge branch 'master' into next
Merge master back into next, this allows us to resolve some conflicts in
arch/powerpc/Kconfig, and also re-sort the symbols under config PPC so
that they are in alphabetical order again.
Diffstat (limited to 'arch')
240 files changed, 3722 insertions, 1751 deletions
diff --git a/arch/alpha/include/asm/io.h b/arch/alpha/include/asm/io.h index 1f6a909d1fa5..0fab5ac90775 100644 --- a/arch/alpha/include/asm/io.h +++ b/arch/alpha/include/asm/io.h @@ -602,11 +602,6 @@ extern void outsl (unsigned long port, const void *src, unsigned long count); */ #define xlate_dev_mem_ptr(p) __va(p) -/* - * Convert a virtual cached pointer to an uncached pointer - */ -#define xlate_dev_kmem_ptr(p) p - #endif /* __KERNEL__ */ #endif /* __ALPHA_IO_H */ diff --git a/arch/alpha/kernel/pc873xx.c b/arch/alpha/kernel/pc873xx.c index 63aee5d86e02..82b19c9e59d6 100644 --- a/arch/alpha/kernel/pc873xx.c +++ b/arch/alpha/kernel/pc873xx.c @@ -13,12 +13,12 @@ static char *pc873xx_names[] = { static unsigned int base, model; -unsigned int __init pc873xx_get_base() +unsigned int __init pc873xx_get_base(void) { return base; } -char *__init pc873xx_get_model() +char *__init pc873xx_get_model(void) { return pc873xx_names[model]; } diff --git a/arch/alpha/lib/csum_partial_copy.c b/arch/alpha/lib/csum_partial_copy.c index dc68efbe9367..1931a04af85a 100644 --- a/arch/alpha/lib/csum_partial_copy.c +++ b/arch/alpha/lib/csum_partial_copy.c @@ -13,6 +13,7 @@ #include <linux/types.h> #include <linux/string.h> #include <linux/uaccess.h> +#include <net/checksum.h> #define ldq_u(x,y) \ diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig index bc8d6aecfbbd..2d98501c0897 100644 --- a/arch/arc/Kconfig +++ b/arch/arc/Kconfig @@ -6,6 +6,7 @@ config ARC def_bool y select ARC_TIMERS + select ARCH_HAS_CACHE_LINE_SIZE select ARCH_HAS_DEBUG_VM_PGTABLE select ARCH_HAS_DMA_PREP_COHERENT select ARCH_HAS_PTE_SPECIAL @@ -28,6 +29,7 @@ config ARC select GENERIC_SMP_IDLE_THREAD select HAVE_ARCH_KGDB select HAVE_ARCH_TRACEHOOK + select HAVE_ARCH_TRANSPARENT_HUGEPAGE if ARC_MMU_V4 select HAVE_DEBUG_STACKOVERFLOW select HAVE_DEBUG_KMEMLEAK select HAVE_FUTEX_CMPXCHG if FUTEX @@ -48,9 +50,6 @@ config ARC select HAVE_ARCH_JUMP_LABEL if ISA_ARCV2 && !CPU_ENDIAN_BE32 select SET_FS -config ARCH_HAS_CACHE_LINE_SIZE - def_bool y - config TRACE_IRQFLAGS_SUPPORT def_bool y @@ -86,10 +85,6 @@ config STACKTRACE_SUPPORT def_bool y select STACKTRACE -config HAVE_ARCH_TRANSPARENT_HUGEPAGE - def_bool y - depends on ARC_MMU_V4 - menu "ARC Architecture Configuration" menu "ARC Platform/SoC/Board" diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 085c830d344b..24804f11302d 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -31,6 +31,7 @@ config ARM select ARCH_OPTIONAL_KERNEL_RWX if ARCH_HAS_STRICT_KERNEL_RWX select ARCH_OPTIONAL_KERNEL_RWX_DEFAULT if CPU_V7 select ARCH_SUPPORTS_ATOMIC_RMW + select ARCH_SUPPORTS_HUGETLBFS if ARM_LPAE select ARCH_USE_BUILTIN_BSWAP select ARCH_USE_CMPXCHG_LOCKREF select ARCH_USE_MEMTEST @@ -77,6 +78,7 @@ config ARM select HAVE_ARCH_SECCOMP_FILTER if AEABI && !OABI_COMPAT select HAVE_ARCH_THREAD_STRUCT_WHITELIST select HAVE_ARCH_TRACEHOOK + select HAVE_ARCH_TRANSPARENT_HUGEPAGE if ARM_LPAE select HAVE_ARM_SMCCC if CPU_V7 select HAVE_EBPF_JIT if !CPU_ENDIAN_BE32 select HAVE_CONTEXT_TRACKING @@ -1511,14 +1513,6 @@ config HW_PERF_EVENTS def_bool y depends on ARM_PMU -config SYS_SUPPORTS_HUGETLBFS - def_bool y - depends on ARM_LPAE - -config HAVE_ARCH_TRANSPARENT_HUGEPAGE - def_bool y - depends on ARM_LPAE - config ARCH_WANT_GENERAL_HUGETLB def_bool y diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile index fd94e27ba4fa..c1f804768621 100644 --- a/arch/arm/boot/compressed/Makefile +++ b/arch/arm/boot/compressed/Makefile @@ -118,8 +118,8 @@ asflags-y := -DZIMAGE # Supply kernel BSS size to the decompressor via a linker symbol. KBSS_SZ = $(shell echo $$(($$($(NM) $(obj)/../../../../vmlinux | \ - sed -n -e 's/^\([^ ]*\) [AB] __bss_start$$/-0x\1/p' \ - -e 's/^\([^ ]*\) [AB] __bss_stop$$/+0x\1/p') )) ) + sed -n -e 's/^\([^ ]*\) [ABD] __bss_start$$/-0x\1/p' \ + -e 's/^\([^ ]*\) [ABD] __bss_stop$$/+0x\1/p') )) ) LDFLAGS_vmlinux = --defsym _kernel_bss_size=$(KBSS_SZ) # Supply ZRELADDR to the decompressor via a linker symbol. ifneq ($(CONFIG_AUTO_ZRELADDR),y) diff --git a/arch/arm/boot/dts/rk3036.dtsi b/arch/arm/boot/dts/rk3036.dtsi index 47a787a12e55..e24230d50a78 100644 --- a/arch/arm/boot/dts/rk3036.dtsi +++ b/arch/arm/boot/dts/rk3036.dtsi @@ -355,7 +355,6 @@ reg = <0x20050000 0x10>; #pwm-cells = <3>; clocks = <&cru PCLK_PWM>; - clock-names = "pwm"; pinctrl-names = "default"; pinctrl-0 = <&pwm0_pin>; status = "disabled"; @@ -366,7 +365,6 @@ reg = <0x20050010 0x10>; #pwm-cells = <3>; clocks = <&cru PCLK_PWM>; - clock-names = "pwm"; pinctrl-names = "default"; pinctrl-0 = <&pwm1_pin>; status = "disabled"; @@ -377,7 +375,6 @@ reg = <0x20050020 0x10>; #pwm-cells = <3>; clocks = <&cru PCLK_PWM>; - clock-names = "pwm"; pinctrl-names = "default"; pinctrl-0 = <&pwm2_pin>; status = "disabled"; @@ -388,7 +385,6 @@ reg = <0x20050030 0x10>; #pwm-cells = <2>; clocks = <&cru PCLK_PWM>; - clock-names = "pwm"; pinctrl-names = "default"; pinctrl-0 = <&pwm3_pin>; status = "disabled"; diff --git a/arch/arm/boot/dts/rk3288.dtsi b/arch/arm/boot/dts/rk3288.dtsi index ea7416c31f9b..05557ad02b33 100644 --- a/arch/arm/boot/dts/rk3288.dtsi +++ b/arch/arm/boot/dts/rk3288.dtsi @@ -679,7 +679,6 @@ pinctrl-names = "default"; pinctrl-0 = <&pwm0_pin>; clocks = <&cru PCLK_RKPWM>; - clock-names = "pwm"; status = "disabled"; }; @@ -690,7 +689,6 @@ pinctrl-names = "default"; pinctrl-0 = <&pwm1_pin>; clocks = <&cru PCLK_RKPWM>; - clock-names = "pwm"; status = "disabled"; }; @@ -701,7 +699,6 @@ pinctrl-names = "default"; pinctrl-0 = <&pwm2_pin>; clocks = <&cru PCLK_RKPWM>; - clock-names = "pwm"; status = "disabled"; }; @@ -712,7 +709,6 @@ pinctrl-names = "default"; pinctrl-0 = <&pwm3_pin>; clocks = <&cru PCLK_RKPWM>; - clock-names = "pwm"; status = "disabled"; }; diff --git a/arch/arm/configs/dove_defconfig b/arch/arm/configs/dove_defconfig index e70c997d5f4c..b935162a8bba 100644 --- a/arch/arm/configs/dove_defconfig +++ b/arch/arm/configs/dove_defconfig @@ -63,7 +63,6 @@ CONFIG_INPUT_EVDEV=y # CONFIG_MOUSE_PS2 is not set # CONFIG_SERIO is not set CONFIG_LEGACY_PTY_COUNT=16 -# CONFIG_DEVKMEM is not set CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250_CONSOLE=y CONFIG_SERIAL_8250_RUNTIME_UARTS=2 diff --git a/arch/arm/configs/footbridge_defconfig b/arch/arm/configs/footbridge_defconfig index 3a7938f244e5..2aa3ebeb89d7 100644 --- a/arch/arm/configs/footbridge_defconfig +++ b/arch/arm/configs/footbridge_defconfig @@ -7,7 +7,6 @@ CONFIG_EXPERT=y CONFIG_MODULES=y CONFIG_ARCH_FOOTBRIDGE=y CONFIG_ARCH_CATS=y -CONFIG_ARCH_PERSONAL_SERVER=y CONFIG_ARCH_EBSA285_HOST=y CONFIG_ARCH_NETWINDER=y CONFIG_LEDS=y diff --git a/arch/arm/configs/magician_defconfig b/arch/arm/configs/magician_defconfig index b4670d42f378..abde1fb23b20 100644 --- a/arch/arm/configs/magician_defconfig +++ b/arch/arm/configs/magician_defconfig @@ -72,7 +72,6 @@ CONFIG_INPUT_TOUCHSCREEN=y CONFIG_INPUT_MISC=y CONFIG_INPUT_UINPUT=m # CONFIG_SERIO is not set -# CONFIG_DEVKMEM is not set CONFIG_SERIAL_PXA=y # CONFIG_LEGACY_PTYS is not set # CONFIG_HW_RANDOM is not set diff --git a/arch/arm/configs/moxart_defconfig b/arch/arm/configs/moxart_defconfig index 6834e97af348..eacc089d86c5 100644 --- a/arch/arm/configs/moxart_defconfig +++ b/arch/arm/configs/moxart_defconfig @@ -79,7 +79,6 @@ CONFIG_INPUT_EVBUG=y # CONFIG_SERIO is not set # CONFIG_VT is not set # CONFIG_LEGACY_PTYS is not set -# CONFIG_DEVKMEM is not set CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250_CONSOLE=y CONFIG_SERIAL_8250_NR_UARTS=1 diff --git a/arch/arm/configs/mps2_defconfig b/arch/arm/configs/mps2_defconfig index 1d923dbb9928..89f4a6ff30bd 100644 --- a/arch/arm/configs/mps2_defconfig +++ b/arch/arm/configs/mps2_defconfig @@ -69,7 +69,6 @@ CONFIG_SMSC911X=y # CONFIG_VT is not set # CONFIG_LEGACY_PTYS is not set CONFIG_SERIAL_NONSTANDARD=y -# CONFIG_DEVKMEM is not set CONFIG_SERIAL_MPS2_UART_CONSOLE=y CONFIG_SERIAL_MPS2_UART=y # CONFIG_HW_RANDOM is not set diff --git a/arch/arm/configs/mvebu_v5_defconfig b/arch/arm/configs/mvebu_v5_defconfig index 4f16716bfc32..d57ff30dabff 100644 --- a/arch/arm/configs/mvebu_v5_defconfig +++ b/arch/arm/configs/mvebu_v5_defconfig @@ -100,7 +100,6 @@ CONFIG_INPUT_EVDEV=y CONFIG_KEYBOARD_GPIO=y # CONFIG_INPUT_MOUSE is not set CONFIG_LEGACY_PTY_COUNT=16 -# CONFIG_DEVKMEM is not set CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250_CONSOLE=y CONFIG_SERIAL_8250_RUNTIME_UARTS=2 diff --git a/arch/arm/configs/xcep_defconfig b/arch/arm/configs/xcep_defconfig index f1fbdfc5c8c6..4d8e7f2eaef7 100644 --- a/arch/arm/configs/xcep_defconfig +++ b/arch/arm/configs/xcep_defconfig @@ -53,7 +53,6 @@ CONFIG_NET_ETHERNET=y # CONFIG_INPUT_KEYBOARD is not set # CONFIG_INPUT_MOUSE is not set # CONFIG_SERIO is not set -# CONFIG_DEVKMEM is not set CONFIG_SERIAL_PXA=y CONFIG_SERIAL_PXA_CONSOLE=y # CONFIG_LEGACY_PTYS is not set diff --git a/arch/arm/include/asm/bug.h b/arch/arm/include/asm/bug.h index 673c7dd75ab9..ba8d9d7d242b 100644 --- a/arch/arm/include/asm/bug.h +++ b/arch/arm/include/asm/bug.h @@ -88,5 +88,6 @@ extern asmlinkage void c_backtrace(unsigned long fp, int pmode, struct mm_struct; void show_pte(const char *lvl, struct mm_struct *mm, unsigned long addr); extern void __show_regs(struct pt_regs *); +extern void __show_regs_alloc_free(struct pt_regs *regs); #endif diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h index fc748122f1e0..f74944c6fe8d 100644 --- a/arch/arm/include/asm/io.h +++ b/arch/arm/include/asm/io.h @@ -430,11 +430,6 @@ extern void pci_iounmap(struct pci_dev *dev, void __iomem *addr); */ #define xlate_dev_mem_ptr(p) __va(p) -/* - * Convert a virtual cached pointer to an uncached pointer - */ -#define xlate_dev_kmem_ptr(p) p - #include <asm-generic/io.h> #ifdef CONFIG_MMU diff --git a/arch/arm/include/asm/kexec.h b/arch/arm/include/asm/kexec.h index 22751b5b5735..e62832dcba76 100644 --- a/arch/arm/include/asm/kexec.h +++ b/arch/arm/include/asm/kexec.h @@ -56,9 +56,6 @@ static inline void crash_setup_regs(struct pt_regs *newregs, } } -/* Function pointer to optional machine-specific reinitialization */ -extern void (*kexec_reinit)(void); - static inline unsigned long phys_to_boot_phys(phys_addr_t phys) { return phys_to_idmap(phys); diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h index 2f841cb65c30..a711322d9f40 100644 --- a/arch/arm/include/asm/memory.h +++ b/arch/arm/include/asm/memory.h @@ -150,21 +150,6 @@ extern unsigned long vectors_base; */ #define PLAT_PHYS_OFFSET UL(CONFIG_PHYS_OFFSET) -#ifdef CONFIG_XIP_KERNEL -/* - * When referencing data in RAM from the XIP region in a relative manner - * with the MMU off, we need the relative offset between the two physical - * addresses. The macro below achieves this, which is: - * __pa(v_data) - __xip_pa(v_text) - */ -#define PHYS_RELATIVE(v_data, v_text) \ - (((v_data) - PAGE_OFFSET + PLAT_PHYS_OFFSET) - \ - ((v_text) - XIP_VIRT_ADDR(CONFIG_XIP_PHYS_ADDR) + \ - CONFIG_XIP_PHYS_ADDR)) -#else -#define PHYS_RELATIVE(v_data, v_text) ((v_data) - (v_text)) -#endif - #ifndef __ASSEMBLY__ /* diff --git a/arch/arm/include/asm/set_memory.h b/arch/arm/include/asm/set_memory.h index a1ceff4295d3..ec17fc0fda7a 100644 --- a/arch/arm/include/asm/set_memory.h +++ b/arch/arm/include/asm/set_memory.h @@ -18,12 +18,4 @@ static inline int set_memory_x(unsigned long addr, int numpages) { return 0; } static inline int set_memory_nx(unsigned long addr, int numpages) { return 0; } #endif -#ifdef CONFIG_STRICT_KERNEL_RWX -void set_kernel_text_rw(void); -void set_kernel_text_ro(void); -#else -static inline void set_kernel_text_rw(void) { } -static inline void set_kernel_text_ro(void) { } -#endif - #endif diff --git a/arch/arm/include/uapi/asm/Kbuild b/arch/arm/include/uapi/asm/Kbuild index ce8573157774..63748af8bc9d 100644 --- a/arch/arm/include/uapi/asm/Kbuild +++ b/arch/arm/include/uapi/asm/Kbuild @@ -1,6 +1,5 @@ # SPDX-License-Identifier: GPL-2.0 -generated-y += unistd-common.h generated-y += unistd-oabi.h generated-y += unistd-eabi.h generic-y += kvm_para.h diff --git a/arch/arm/include/uapi/asm/unistd.h b/arch/arm/include/uapi/asm/unistd.h index 93ecf8aa4fe5..ae7749e15726 100644 --- a/arch/arm/include/uapi/asm/unistd.h +++ b/arch/arm/include/uapi/asm/unistd.h @@ -24,7 +24,6 @@ #include <asm/unistd-oabi.h> #endif -#include <asm/unistd-common.h> #define __NR_sync_file_range2 __NR_arm_sync_file_range /* diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c index be8050b0c3df..70993af22d80 100644 --- a/arch/arm/kernel/asm-offsets.c +++ b/arch/arm/kernel/asm-offsets.c @@ -24,6 +24,7 @@ #include <asm/vdso_datapage.h> #include <asm/hardware/cache-l2x0.h> #include <linux/kbuild.h> +#include <linux/arm-smccc.h> #include "signal.h" /* @@ -148,6 +149,8 @@ int main(void) DEFINE(SLEEP_SAVE_SP_PHYS, offsetof(struct sleep_save_sp, save_ptr_stash_phys)); DEFINE(SLEEP_SAVE_SP_VIRT, offsetof(struct sleep_save_sp, save_ptr_stash)); #endif + DEFINE(ARM_SMCCC_QUIRK_ID_OFFS, offsetof(struct arm_smccc_quirk, id)); + DEFINE(ARM_SMCCC_QUIRK_STATE_OFFS, offsetof(struct arm_smccc_quirk, state)); BLANK(); DEFINE(DMA_BIDIRECTIONAL, DMA_BIDIRECTIONAL); DEFINE(DMA_TO_DEVICE, DMA_TO_DEVICE); diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index e0d7833a1827..7f0b7aba1498 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S @@ -344,20 +344,19 @@ ENTRY(\sym) .size \sym, . - \sym .endm -#define NATIVE(nr, func) syscall nr, func +#define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, native) +#define __SYSCALL(nr, func) syscall nr, func /* * This is the syscall table declaration for native ABI syscalls. * With EABI a couple syscalls are obsolete and defined as sys_ni_syscall. */ syscall_table_start sys_call_table -#define COMPAT(nr, native, compat) syscall nr, native #ifdef CONFIG_AEABI #include <calls-eabi.S> #else #include <calls-oabi.S> #endif -#undef COMPAT syscall_table_end sys_call_table /*============================================================================ @@ -455,7 +454,8 @@ ENDPROC(sys_oabi_readahead) * using the compatibility syscall entries. */ syscall_table_start sys_oabi_call_table -#define COMPAT(nr, native, compat) syscall nr, compat +#undef __SYSCALL_WITH_COMPAT +#define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, compat) #include <calls-oabi.S> syscall_table_end sys_oabi_call_table diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c index 08660ae9dcbc..b1423fb130ea 100644 --- a/arch/arm/kernel/hw_breakpoint.c +++ b/arch/arm/kernel/hw_breakpoint.c @@ -886,7 +886,7 @@ static void breakpoint_handler(unsigned long unknown, struct pt_regs *regs) info->trigger = addr; pr_debug("breakpoint fired: address = 0x%x\n", addr); perf_bp_event(bp, regs); - if (!bp->overflow_handler) + if (is_default_overflow_handler(bp)) enable_single_step(bp, addr); goto unlock; } diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c index 2b09dad7935e..f567032a09c0 100644 --- a/arch/arm/kernel/machine_kexec.c +++ b/arch/arm/kernel/machine_kexec.c @@ -147,11 +147,6 @@ void machine_crash_shutdown(struct pt_regs *regs) pr_info("Loading crashdump kernel...\n"); } -/* - * Function pointer to optional machine-specific reinitialization - */ -void (*kexec_reinit)(void); - void machine_kexec(struct kimage *image) { unsigned long page_list, reboot_entry_phys; @@ -187,9 +182,6 @@ void machine_kexec(struct kimage *image) pr_info("Bye!\n"); - if (kexec_reinit) - kexec_reinit(); - soft_restart(reboot_entry_phys); } diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index 5199a2bb4111..6324f4db9b02 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c @@ -92,6 +92,17 @@ void arch_cpu_idle_exit(void) ledtrig_cpu(CPU_LED_IDLE_END); } +void __show_regs_alloc_free(struct pt_regs *regs) +{ + int i; + + /* check for r0 - r12 only */ + for (i = 0; i < 13; i++) { + pr_alert("Register r%d information:", i); + mem_dump_obj((void *)regs->uregs[i]); + } +} + void __show_regs(struct pt_regs *regs) { unsigned long flags; diff --git a/arch/arm/kernel/smccc-call.S b/arch/arm/kernel/smccc-call.S index 00664c78faca..931df62a7831 100644 --- a/arch/arm/kernel/smccc-call.S +++ b/arch/arm/kernel/smccc-call.S @@ -3,7 +3,9 @@ * Copyright (c) 2015, Linaro Limited */ #include <linux/linkage.h> +#include <linux/arm-smccc.h> +#include <asm/asm-offsets.h> #include <asm/opcodes-sec.h> #include <asm/opcodes-virt.h> #include <asm/unwind.h> @@ -27,7 +29,14 @@ UNWIND( .fnstart) UNWIND( .save {r4-r7}) ldm r12, {r4-r7} \instr - pop {r4-r7} + ldr r4, [sp, #36] + cmp r4, #0 + beq 1f // No quirk structure + ldr r5, [r4, #ARM_SMCCC_QUIRK_ID_OFFS] + cmp r5, #ARM_SMCCC_QUIRK_QCOM_A6 + bne 1f // No quirk present + str r6, [r4, #ARM_SMCCC_QUIRK_STATE_OFFS] +1: pop {r4-r7} ldr r12, [sp, #(4 * 4)] stm r12, {r0-r3} bx lr diff --git a/arch/arm/kernel/suspend.c b/arch/arm/kernel/suspend.c index 24bd20564be7..43f0a3ebf390 100644 --- a/arch/arm/kernel/suspend.c +++ b/arch/arm/kernel/suspend.c @@ -1,4 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 +#include <linux/ftrace.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/mm_types.h> @@ -26,12 +27,22 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) return -EINVAL; /* + * Function graph tracer state gets incosistent when the kernel + * calls functions that never return (aka suspend finishers) hence + * disable graph tracing during their execution. + */ + pause_graph_tracing(); + + /* * Provide a temporary page table with an identity mapping for * the MMU-enable code, required for resuming. On successful * resume (indicated by a zero return code), we need to switch * back to the correct page tables. */ ret = __cpu_suspend(arg, fn, __mpidr); + + unpause_graph_tracing(); + if (ret == 0) { cpu_switch_mm(mm->pgd, mm); local_flush_bp_all(); @@ -45,7 +56,13 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) int cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) { u32 __mpidr = cpu_logical_map(smp_processor_id()); - return __cpu_suspend(arg, fn, __mpidr); + int ret; + + pause_graph_tracing(); + ret = __cpu_suspend(arg, fn, __mpidr); + unpause_graph_tracing(); + + return ret; } #define idmap_pgd NULL #endif diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index 17d5a785df28..64308e3a5d0c 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c @@ -287,6 +287,7 @@ static int __die(const char *str, int err, struct pt_regs *regs) print_modules(); __show_regs(regs); + __show_regs_alloc_free(regs); pr_emerg("Process %.*s (pid: %d, stack limit = 0x%p)\n", TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk), end_of_stack(tsk)); diff --git a/arch/arm/mach-footbridge/Kconfig b/arch/arm/mach-footbridge/Kconfig index 844aa585b966..728aff93fba9 100644 --- a/arch/arm/mach-footbridge/Kconfig +++ b/arch/arm/mach-footbridge/Kconfig @@ -16,27 +16,6 @@ config ARCH_CATS Saying N will reduce the size of the Footbridge kernel. -config ARCH_PERSONAL_SERVER - bool "Compaq Personal Server" - select FOOTBRIDGE_HOST - select ISA - select ISA_DMA - select FORCE_PCI - help - Say Y here if you intend to run this kernel on the Compaq - Personal Server. - - Saying N will reduce the size of the Footbridge kernel. - - The Compaq Personal Server is not available for purchase. - There are no product plans beyond the current research - prototypes at this time. Information is available at: - - <http://www.crl.hpl.hp.com/projects/personalserver/> - - If you have any questions or comments about the Compaq Personal - Server, send e-mail to <skiff@crl.dec.com>. - config ARCH_EBSA285_ADDIN bool "EBSA285 (addin mode)" select ARCH_EBSA285 diff --git a/arch/arm/mach-footbridge/Makefile b/arch/arm/mach-footbridge/Makefile index a09f1041f141..6262993c0555 100644 --- a/arch/arm/mach-footbridge/Makefile +++ b/arch/arm/mach-footbridge/Makefile @@ -11,12 +11,10 @@ pci-y += dc21285.o pci-$(CONFIG_ARCH_CATS) += cats-pci.o pci-$(CONFIG_ARCH_EBSA285_HOST) += ebsa285-pci.o pci-$(CONFIG_ARCH_NETWINDER) += netwinder-pci.o -pci-$(CONFIG_ARCH_PERSONAL_SERVER) += personal-pci.o obj-$(CONFIG_ARCH_CATS) += cats-hw.o isa-timer.o obj-$(CONFIG_ARCH_EBSA285) += ebsa285.o dc21285-timer.o obj-$(CONFIG_ARCH_NETWINDER) += netwinder-hw.o isa-timer.o -obj-$(CONFIG_ARCH_PERSONAL_SERVER) += personal.o dc21285-timer.o obj-$(CONFIG_PCI) +=$(pci-y) diff --git a/arch/arm/mach-footbridge/personal-pci.c b/arch/arm/mach-footbridge/personal-pci.c deleted file mode 100644 index 9d19aa98a663..000000000000 --- a/arch/arm/mach-footbridge/personal-pci.c +++ /dev/null @@ -1,57 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * linux/arch/arm/mach-footbridge/personal-pci.c - * - * PCI bios-type initialisation for PCI machines - * - * Bits taken from various places. - */ -#include <linux/kernel.h> -#include <linux/pci.h> -#include <linux/init.h> - -#include <asm/irq.h> -#include <asm/mach/pci.h> -#include <asm/mach-types.h> - -static int irqmap_personal_server[] = { - IRQ_IN0, IRQ_IN1, IRQ_IN2, IRQ_IN3, 0, 0, 0, - IRQ_DOORBELLHOST, IRQ_DMA1, IRQ_DMA2, IRQ_PCI -}; - -static int personal_server_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) -{ - unsigned char line; - - pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &line); - - if (line > 0x40 && line <= 0x5f) { - /* line corresponds to the bit controlling this interrupt - * in the footbridge. Ignore the first 8 interrupt bits, - * look up the rest in the map. IN0 is bit number 8 - */ - return irqmap_personal_server[(line & 0x1f) - 8]; - } else if (line == 0) { - /* no interrupt */ - return 0; - } else - return irqmap_personal_server[(line - 1) & 3]; -} - -static struct hw_pci personal_server_pci __initdata = { - .map_irq = personal_server_map_irq, - .nr_controllers = 1, - .ops = &dc21285_ops, - .setup = dc21285_setup, - .preinit = dc21285_preinit, - .postinit = dc21285_postinit, -}; - -static int __init personal_pci_init(void) -{ - if (machine_is_personal_server()) - pci_common_init(&personal_server_pci); - return 0; -} - -subsys_initcall(personal_pci_init); diff --git a/arch/arm/mach-footbridge/personal.c b/arch/arm/mach-footbridge/personal.c deleted file mode 100644 index ca715754fc00..000000000000 --- a/arch/arm/mach-footbridge/personal.c +++ /dev/null @@ -1,25 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * linux/arch/arm/mach-footbridge/personal.c - * - * Personal server (Skiff) machine fixup - */ -#include <linux/init.h> -#include <linux/spinlock.h> - -#include <asm/hardware/dec21285.h> -#include <asm/mach-types.h> - -#include <asm/mach/arch.h> - -#include "common.h" - -MACHINE_START(PERSONAL_SERVER, "Compaq-PersonalServer") - /* Maintainer: Jamey Hicks / George France */ - .atag_offset = 0x100, - .map_io = footbridge_map_io, - .init_irq = footbridge_init_irq, - .init_time = footbridge_timer_init, - .restart = footbridge_restart, -MACHINE_END - diff --git a/arch/arm/mach-iop32x/n2100.c b/arch/arm/mach-iop32x/n2100.c index 78b9a5ee41c9..bf99e718f8b8 100644 --- a/arch/arm/mach-iop32x/n2100.c +++ b/arch/arm/mach-iop32x/n2100.c @@ -116,16 +116,16 @@ static struct hw_pci n2100_pci __initdata = { }; /* - * Both r8169 chips on the n2100 exhibit PCI parity problems. Set - * the ->broken_parity_status flag for both ports so that the r8169 - * driver knows it should ignore error interrupts. + * Both r8169 chips on the n2100 exhibit PCI parity problems. Turn + * off parity reporting for both ports so we don't get error interrupts + * for them. */ static void n2100_fixup_r8169(struct pci_dev *dev) { if (dev->bus->number == 0 && (dev->devfn == PCI_DEVFN(1, 0) || dev->devfn == PCI_DEVFN(2, 0))) - dev->broken_parity_status = 1; + pci_disable_parity(dev); } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_REALTEK, PCI_ANY_ID, n2100_fixup_r8169); diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S index dc8f152f3556..830bbfb26ca5 100644 --- a/arch/arm/mm/cache-v7.S +++ b/arch/arm/mm/cache-v7.S @@ -33,41 +33,41 @@ icache_size: * processor. We fix this by performing an invalidate, rather than a * clean + invalidate, before jumping into the kernel. * - * This function is cloned from arch/arm/mach-tegra/headsmp.S, and needs - * to be called for both secondary cores startup and primary core resume - * procedures. + * This function needs to be called for both secondary cores startup and + * primary core resume procedures. */ ENTRY(v7_invalidate_l1) - mov r0, #0 - mcr p15, 2, r0, c0, c0, 0 - mrc p15, 1, r0, c0, c0, 0 - - movw r1, #0x7fff - and r2, r1, r0, lsr #13 + mov r0, #0 + mcr p15, 2, r0, c0, c0, 0 @ select L1 data cache in CSSELR + isb + mrc p15, 1, r0, c0, c0, 0 @ read cache geometry from CCSIDR - movw r1, #0x3ff + movw r3, #0x3ff + and r3, r3, r0, lsr #3 @ 'Associativity' in CCSIDR[12:3] + clz r1, r3 @ WayShift + mov r2, #1 + mov r3, r3, lsl r1 @ NumWays-1 shifted into bits [31:...] + movs r1, r2, lsl r1 @ #1 shifted left by same amount + moveq r1, #1 @ r1 needs value > 0 even if only 1 way - and r3, r1, r0, lsr #3 @ NumWays - 1 - add r2, r2, #1 @ NumSets + and r2, r0, #0x7 + add r2, r2, #4 @ SetShift - and r0, r0, #0x7 - add r0, r0, #4 @ SetShift +1: movw ip, #0x7fff + and r0, ip, r0, lsr #13 @ 'NumSets' in CCSIDR[27:13] - clz r1, r3 @ WayShift - add r4, r3, #1 @ NumWays -1: sub r2, r2, #1 @ NumSets-- - mov r3, r4 @ Temp = NumWays -2: subs r3, r3, #1 @ Temp-- - mov r5, r3, lsl r1 - mov r6, r2, lsl r0 - orr r5, r5, r6 @ Reg = (Temp<<WayShift)|(NumSets<<SetShift) - mcr p15, 0, r5, c7, c6, 2 - bgt 2b - cmp r2, #0 - bgt 1b - dsb st - isb - ret lr +2: mov ip, r0, lsl r2 @ NumSet << SetShift + orr ip, ip, r3 @ Reg = (Temp<<WayShift)|(NumSets<<SetShift) + mcr p15, 0, ip, c7, c6, 2 + subs r0, r0, #1 @ Set-- + bpl 2b + subs r3, r3, r1 @ Way-- + bcc 3f + mrc p15, 1, r0, c0, c0, 0 @ re-read cache geometry from CCSIDR + b 1b +3: dsb st + isb + ret lr ENDPROC(v7_invalidate_l1) /* diff --git a/arch/arm/mm/dump.c b/arch/arm/mm/dump.c index 93ff0097f00b..fb688003d156 100644 --- a/arch/arm/mm/dump.c +++ b/arch/arm/mm/dump.c @@ -420,7 +420,7 @@ void ptdump_walk_pgd(struct seq_file *m, struct ptdump_info *info) note_page(&st, 0, 0, 0, NULL); } -static void ptdump_initialize(void) +static void __init ptdump_initialize(void) { unsigned i, j; @@ -466,7 +466,7 @@ void ptdump_check_wx(void) pr_info("Checked W+X mappings: passed, no W+X pages found\n"); } -static int ptdump_init(void) +static int __init ptdump_init(void) { ptdump_initialize(); ptdump_debugfs_register(&kernel_ptdump_info, "kernel_page_tables"); diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 7022b7b5c400..9d4744a632c6 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -301,7 +301,11 @@ static void __init free_highpages(void) void __init mem_init(void) { #ifdef CONFIG_ARM_LPAE - swiotlb_init(1); + if (swiotlb_force == SWIOTLB_FORCE || + max_pfn > arm_dma_pfn_limit) + swiotlb_init(1); + else + swiotlb_force = SWIOTLB_NO_FORCE; #endif set_max_mapnr(pfn_to_page(max_pfn) - mem_map); @@ -485,33 +489,12 @@ static int __mark_rodata_ro(void *unused) return 0; } -static int kernel_set_to_readonly __read_mostly; - void mark_rodata_ro(void) { - kernel_set_to_readonly = 1; stop_machine(__mark_rodata_ro, NULL, NULL); debug_checkwx(); } -void set_kernel_text_rw(void) -{ - if (!kernel_set_to_readonly) - return; - - set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), false, - current->active_mm); -} - -void set_kernel_text_ro(void) -{ - if (!kernel_set_to_readonly) - return; - - set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), true, - current->active_mm); -} - #else static inline void fix_kernmem_perms(void) { } #endif /* CONFIG_STRICT_KERNEL_RWX */ diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S index 28c9d32fa99a..26d726a08a34 100644 --- a/arch/arm/mm/proc-v7.S +++ b/arch/arm/mm/proc-v7.S @@ -256,6 +256,20 @@ ENDPROC(cpu_pj4b_do_resume) #endif + @ + @ Invoke the v7_invalidate_l1() function, which adheres to the AAPCS + @ rules, and so it may corrupt registers that we need to preserve. + @ + .macro do_invalidate_l1 + mov r6, r1 + mov r7, r2 + mov r10, lr + bl v7_invalidate_l1 @ corrupts {r0-r3, ip, lr} + mov r1, r6 + mov r2, r7 + mov lr, r10 + .endm + /* * __v7_setup * @@ -277,6 +291,7 @@ __v7_ca5mp_setup: __v7_ca9mp_setup: __v7_cr7mp_setup: __v7_cr8mp_setup: + do_invalidate_l1 mov r10, #(1 << 0) @ Cache/TLB ops broadcasting b 1f __v7_ca7mp_setup: @@ -284,13 +299,9 @@ __v7_ca12mp_setup: __v7_ca15mp_setup: __v7_b15mp_setup: __v7_ca17mp_setup: + do_invalidate_l1 mov r10, #0 -1: adr r0, __v7_setup_stack_ptr - ldr r12, [r0] - add r12, r12, r0 @ the local stack - stmia r12, {r1-r6, lr} @ v7_invalidate_l1 touches r0-r6 - bl v7_invalidate_l1 - ldmia r12, {r1-r6, lr} +1: #ifdef CONFIG_SMP orr r10, r10, #(1 << 6) @ Enable SMP/nAMP mode ALT_SMP(mrc p15, 0, r0, c1, c0, 1) @@ -471,12 +482,7 @@ __v7_pj4b_setup: #endif /* CONFIG_CPU_PJ4B */ __v7_setup: - adr r0, __v7_setup_stack_ptr - ldr r12, [r0] - add r12, r12, r0 @ the local stack - stmia r12, {r1-r6, lr} @ v7_invalidate_l1 touches r0-r6 - bl v7_invalidate_l1 - ldmia r12, {r1-r6, lr} + do_invalidate_l1 __v7_setup_cont: and r0, r9, #0xff000000 @ ARM? @@ -548,17 +554,8 @@ __errata_finish: orr r0, r0, r6 @ set them THUMB( orr r0, r0, #1 << 30 ) @ Thumb exceptions ret lr @ return to head.S:__ret - - .align 2 -__v7_setup_stack_ptr: - .word PHYS_RELATIVE(__v7_setup_stack, .) ENDPROC(__v7_setup) - .bss - .align 2 -__v7_setup_stack: - .space 4 * 7 @ 7 registers - __INITDATA .weak cpu_v7_bugs_init diff --git a/arch/arm/mm/ptdump_debugfs.c b/arch/arm/mm/ptdump_debugfs.c index 598b636615a2..318de969ae0f 100644 --- a/arch/arm/mm/ptdump_debugfs.c +++ b/arch/arm/mm/ptdump_debugfs.c @@ -11,20 +11,9 @@ static int ptdump_show(struct seq_file *m, void *v) ptdump_walk_pgd(m, info); return 0; } +DEFINE_SHOW_ATTRIBUTE(ptdump); -static int ptdump_open(struct inode *inode, struct file *file) -{ - return single_open(file, ptdump_show, inode->i_private); -} - -static const struct file_operations ptdump_fops = { - .open = ptdump_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; - -void ptdump_debugfs_register(struct ptdump_info *info, const char *name) +void __init ptdump_debugfs_register(struct ptdump_info *info, const char *name) { debugfs_create_file(name, 0400, NULL, info, &ptdump_fops); } diff --git a/arch/arm/probes/kprobes/test-arm.c b/arch/arm/probes/kprobes/test-arm.c index 977369f1aa48..a0dae35ffacd 100644 --- a/arch/arm/probes/kprobes/test-arm.c +++ b/arch/arm/probes/kprobes/test-arm.c @@ -55,25 +55,25 @@ void kprobe_arm_test_cases(void) TEST_GROUP("Data-processing (register), (register-shifted register), (immediate)") #define _DATA_PROCESSING_DNM(op,s,val) \ - TEST_RR( op "eq" s " r0, r",1, VAL1,", r",2, val, "") \ - TEST_RR( op "ne" s " r1, r",1, VAL1,", r",2, val, ", lsl #3") \ - TEST_RR( op "cs" s " r2, r",3, VAL1,", r",2, val, ", lsr #4") \ - TEST_RR( op "cc" s " r3, r",3, VAL1,", r",2, val, ", asr #5") \ - TEST_RR( op "mi" s " r4, r",5, VAL1,", r",2, N(val),", asr #6") \ - TEST_RR( op "pl" s " r5, r",5, VAL1,", r",2, val, ", ror #7") \ - TEST_RR( op "vs" s " r6, r",7, VAL1,", r",2, val, ", rrx") \ - TEST_R( op "vc" s " r6, r",7, VAL1,", pc, lsl #3") \ - TEST_R( op "vc" s " r6, r",7, VAL1,", sp, lsr #4") \ - TEST_R( op "vc" s " r6, pc, r",7, VAL1,", asr #5") \ - TEST_R( op "vc" s " r6, sp, r",7, VAL1,", ror #6") \ - TEST_RRR( op "hi" s " r8, r",9, VAL1,", r",14,val, ", lsl r",0, 3,"")\ - TEST_RRR( op "ls" s " r9, r",9, VAL1,", r",14,val, ", lsr r",7, 4,"")\ - TEST_RRR( op "ge" s " r10, r",11,VAL1,", r",14,val, ", asr r",7, 5,"")\ - TEST_RRR( op "lt" s " r11, r",11,VAL1,", r",14,N(val),", asr r",7, 6,"")\ - TEST_RR( op "gt" s " r12, r13" ", r",14,val, ", ror r",14,7,"")\ - TEST_RR( op "le" s " r14, r",0, val, ", r13" ", lsl r",14,8,"")\ - TEST_R( op "eq" s " r0, r",11,VAL1,", #0xf5") \ - TEST_R( op "ne" s " r11, r",0, VAL1,", #0xf5000000") \ + TEST_RR( op s "eq r0, r",1, VAL1,", r",2, val, "") \ + TEST_RR( op s "ne r1, r",1, VAL1,", r",2, val, ", lsl #3") \ + TEST_RR( op s "cs r2, r",3, VAL1,", r",2, val, ", lsr #4") \ + TEST_RR( op s "cc r3, r",3, VAL1,", r",2, val, ", asr #5") \ + TEST_RR( op s "mi r4, r",5, VAL1,", r",2, N(val),", asr #6") \ + TEST_RR( op s "pl r5, r",5, VAL1,", r",2, val, ", ror #7") \ + TEST_RR( op s "vs r6, r",7, VAL1,", r",2, val, ", rrx") \ + TEST_R( op s "vc r6, r",7, VAL1,", pc, lsl #3") \ + TEST_R( op s "vc r6, r",7, VAL1,", sp, lsr #4") \ + TEST_R( op s "vc r6, pc, r",7, VAL1,", asr #5") \ + TEST_R( op s "vc r6, sp, r",7, VAL1,", ror #6") \ + TEST_RRR( op s "hi r8, r",9, VAL1,", r",14,val, ", lsl r",0, 3,"")\ + TEST_RRR( op s "ls r9, r",9, VAL1,", r",14,val, ", lsr r",7, 4,"")\ + TEST_RRR( op s "ge r10, r",11,VAL1,", r",14,val, ", asr r",7, 5,"")\ + TEST_RRR( op s "lt r11, r",11,VAL1,", r",14,N(val),", asr r",7, 6,"")\ + TEST_RR( op s "gt r12, r13" ", r",14,val, ", ror r",14,7,"")\ + TEST_RR( op s "le r14, r",0, val, ", r13" ", lsl r",14,8,"")\ + TEST_R( op s "eq r0, r",11,VAL1,", #0xf5") \ + TEST_R( op s "ne r11, r",0, VAL1,", #0xf5000000") \ TEST_R( op s " r7, r",8, VAL2,", #0x000af000") \ TEST( op s " r4, pc" ", #0x00005a00") @@ -104,23 +104,23 @@ void kprobe_arm_test_cases(void) TEST_R( op " r",8, VAL2,", #0x000af000") #define _DATA_PROCESSING_DM(op,s,val) \ - TEST_R( op "eq" s " r0, r",1, val, "") \ - TEST_R( op "ne" s " r1, r",1, val, ", lsl #3") \ - TEST_R( op "cs" s " r2, r",3, val, ", lsr #4") \ - TEST_R( op "cc" s " r3, r",3, val, ", asr #5") \ - TEST_R( op "mi" s " r4, r",5, N(val),", asr #6") \ - TEST_R( op "pl" s " r5, r",5, val, ", ror #7") \ - TEST_R( op "vs" s " r6, r",10,val, ", rrx") \ - TEST( op "vs" s " r7, pc, lsl #3") \ - TEST( op "vs" s " r7, sp, lsr #4") \ - TEST_RR( op "vc" s " r8, r",7, val, ", lsl r",0, 3,"") \ - TEST_RR( op "hi" s " r9, r",9, val, ", lsr r",7, 4,"") \ - TEST_RR( op "ls" s " r10, r",9, val, ", asr r",7, 5,"") \ - TEST_RR( op "ge" s " r11, r",11,N(val),", asr r",7, 6,"") \ - TEST_RR( op "lt" s " r12, r",11,val, ", ror r",14,7,"") \ - TEST_R( op "gt" s " r14, r13" ", lsl r",14,8,"") \ - TEST( op "eq" s " r0, #0xf5") \ - TEST( op "ne" s " r11, #0xf5000000") \ + TEST_R( op s "eq r0, r",1, val, "") \ + TEST_R( op s "ne r1, r",1, val, ", lsl #3") \ + TEST_R( op s "cs r2, r",3, val, ", lsr #4") \ + TEST_R( op s "cc r3, r",3, val, ", asr #5") \ + TEST_R( op s "mi r4, r",5, N(val),", asr #6") \ + TEST_R( op s "pl r5, r",5, val, ", ror #7") \ + TEST_R( op s "vs r6, r",10,val, ", rrx") \ + TEST( op s "vs r7, pc, lsl #3") \ + TEST( op s "vs r7, sp, lsr #4") \ + TEST_RR( op s "vc r8, r",7, val, ", lsl r",0, 3,"") \ + TEST_RR( op s "hi r9, r",9, val, ", lsr r",7, 4,"") \ + TEST_RR( op s "ls r10, r",9, val, ", asr r",7, 5,"") \ + TEST_RR( op s "ge r11, r",11,N(val),", asr r",7, 6,"") \ + TEST_RR( op s "lt r12, r",11,val, ", ror r",14,7,"") \ + TEST_R( op s "gt r14, r13" ", lsl r",14,8,"") \ + TEST( op s "eq r0, #0xf5") \ + TEST( op s "ne r11, #0xf5000000") \ TEST( op s " r7, #0x000af000") \ TEST( op s " r4, #0x00005a00") @@ -166,10 +166,10 @@ void kprobe_arm_test_cases(void) /* Data-processing with PC as a target and status registers updated */ TEST_UNSUPPORTED("movs pc, r1") - TEST_UNSUPPORTED("movs pc, r1, lsl r2") + TEST_UNSUPPORTED(__inst_arm(0xe1b0f211) " @movs pc, r1, lsl r2") TEST_UNSUPPORTED("movs pc, #0x10000") TEST_UNSUPPORTED("adds pc, lr, r1") - TEST_UNSUPPORTED("adds pc, lr, r1, lsl r2") + TEST_UNSUPPORTED(__inst_arm(0xe09ef211) " @adds pc, lr, r1, lsl r2") TEST_UNSUPPORTED("adds pc, lr, #4") /* Data-processing with SP as target */ @@ -352,7 +352,7 @@ void kprobe_arm_test_cases(void) TEST_UNSUPPORTED(__inst_arm(0xe000029f) " @ mul r0, pc, r2") TEST_UNSUPPORTED(__inst_arm(0xe0000f91) " @ mul r0, r1, pc") TEST_RR( "muls r0, r",1, VAL1,", r",2, VAL2,"") - TEST_RR( "mullss r7, r",8, VAL2,", r",9, VAL2,"") + TEST_RR( "mulsls r7, r",8, VAL2,", r",9, VAL2,"") TEST_R( "muls lr, r",4, VAL3,", r13") TEST_UNSUPPORTED(__inst_arm(0xe01f0291) " @ muls pc, r1, r2") @@ -361,7 +361,7 @@ void kprobe_arm_test_cases(void) TEST_RR( "mla lr, r",1, VAL2,", r",2, VAL3,", r13") TEST_UNSUPPORTED(__inst_arm(0xe02f3291) " @ mla pc, r1, r2, r3") TEST_RRR( "mlas r0, r",1, VAL1,", r",2, VAL2,", r",3, VAL3,"") - TEST_RRR( "mlahis r7, r",8, VAL3,", r",9, VAL1,", r",10, VAL2,"") + TEST_RRR( "mlashi r7, r",8, VAL3,", r",9, VAL1,", r",10, VAL2,"") TEST_RR( "mlas lr, r",1, VAL2,", r",2, VAL3,", r13") TEST_UNSUPPORTED(__inst_arm(0xe03f3291) " @ mlas pc, r1, r2, r3") @@ -394,7 +394,7 @@ void kprobe_arm_test_cases(void) TEST_UNSUPPORTED(__inst_arm(0xe081f392) " @ umull pc, r1, r2, r3") TEST_UNSUPPORTED(__inst_arm(0xe08f1392) " @ umull r1, pc, r2, r3") TEST_RR( "umulls r0, r1, r",2, VAL1,", r",3, VAL2,"") - TEST_RR( "umulllss r7, r8, r",9, VAL2,", r",10, VAL1,"") + TEST_RR( "umullsls r7, r8, r",9, VAL2,", r",10, VAL1,"") TEST_R( "umulls lr, r12, r",11,VAL3,", r13") TEST_UNSUPPORTED(__inst_arm(0xe091f392) " @ umulls pc, r1, r2, r3") TEST_UNSUPPORTED(__inst_arm(0xe09f1392) " @ umulls r1, pc, r2, r3") @@ -405,7 +405,7 @@ void kprobe_arm_test_cases(void) TEST_UNSUPPORTED(__inst_arm(0xe0af1392) " @ umlal pc, r1, r2, r3") TEST_UNSUPPORTED(__inst_arm(0xe0a1f392) " @ umlal r1, pc, r2, r3") TEST_RRRR( "umlals r",0, VAL1,", r",1, VAL2,", r",2, VAL3,", r",3, VAL4) - TEST_RRRR( "umlalles r",8, VAL4,", r",9, VAL1,", r",10,VAL2,", r",11,VAL3) + TEST_RRRR( "umlalsle r",8, VAL4,", r",9, VAL1,", r",10,VAL2,", r",11,VAL3) TEST_RRR( "umlals r",14,VAL3,", r",7, VAL4,", r",5, VAL1,", r13") TEST_UNSUPPORTED(__inst_arm(0xe0bf1392) " @ umlals pc, r1, r2, r3") TEST_UNSUPPORTED(__inst_arm(0xe0b1f392) " @ umlals r1, pc, r2, r3") @@ -416,7 +416,7 @@ void kprobe_arm_test_cases(void) TEST_UNSUPPORTED(__inst_arm(0xe0c1f392) " @ smull pc, r1, r2, r3") TEST_UNSUPPORTED(__inst_arm(0xe0cf1392) " @ smull r1, pc, r2, r3") TEST_RR( "smulls r0, r1, r",2, VAL1,", r",3, VAL2,"") - TEST_RR( "smulllss r7, r8, r",9, VAL2,", r",10, VAL1,"") + TEST_RR( "smullsls r7, r8, r",9, VAL2,", r",10, VAL1,"") TEST_R( "smulls lr, r12, r",11,VAL3,", r13") TEST_UNSUPPORTED(__inst_arm(0xe0d1f392) " @ smulls pc, r1, r2, r3") TEST_UNSUPPORTED(__inst_arm(0xe0df1392) " @ smulls r1, pc, r2, r3") @@ -427,7 +427,7 @@ void kprobe_arm_test_cases(void) TEST_UNSUPPORTED(__inst_arm(0xe0ef1392) " @ smlal pc, r1, r2, r3") TEST_UNSUPPORTED(__inst_arm(0xe0e1f392) " @ smlal r1, pc, r2, r3") TEST_RRRR( "smlals r",0, VAL1,", r",1, VAL2,", r",2, VAL3,", r",3, VAL4) - TEST_RRRR( "smlalles r",8, VAL4,", r",9, VAL1,", r",10,VAL2,", r",11,VAL3) + TEST_RRRR( "smlalsle r",8, VAL4,", r",9, VAL1,", r",10,VAL2,", r",11,VAL3) TEST_RRR( "smlals r",14,VAL3,", r",7, VAL4,", r",5, VAL1,", r13") TEST_UNSUPPORTED(__inst_arm(0xe0ff1392) " @ smlals pc, r1, r2, r3") TEST_UNSUPPORTED(__inst_arm(0xe0f0f392) " @ smlals r0, pc, r2, r3") @@ -450,7 +450,7 @@ void kprobe_arm_test_cases(void) TEST_UNSUPPORTED(__inst_arm(0xe10f0091) " @ swp r0, r1, [pc]") #if __LINUX_ARM_ARCH__ < 6 TEST_RP("swpb lr, r",7,VAL2,", [r",8,0,"]") - TEST_R( "swpvsb r0, r",1,VAL1,", [sp]") + TEST_R( "swpbvs r0, r",1,VAL1,", [sp]") #else TEST_UNSUPPORTED(__inst_arm(0xe148e097) " @ swpb lr, r7, [r8]") TEST_UNSUPPORTED(__inst_arm(0x614d0091) " @ swpvsb r0, r1, [sp]") @@ -477,11 +477,11 @@ void kprobe_arm_test_cases(void) TEST_GROUP("Extra load/store instructions") TEST_RPR( "strh r",0, VAL1,", [r",1, 48,", -r",2, 24,"]") - TEST_RPR( "streqh r",14,VAL2,", [r",11,0, ", r",12, 48,"]") - TEST_UNSUPPORTED( "streqh r14, [r13, r12]") - TEST_UNSUPPORTED( "streqh r14, [r12, r13]") + TEST_RPR( "strheq r",14,VAL2,", [r",11,0, ", r",12, 48,"]") + TEST_UNSUPPORTED( "strheq r14, [r13, r12]") + TEST_UNSUPPORTED( "strheq r14, [r12, r13]") TEST_RPR( "strh r",1, VAL1,", [r",2, 24,", r",3, 48,"]!") - TEST_RPR( "strneh r",12,VAL2,", [r",11,48,", -r",10,24,"]!") + TEST_RPR( "strhne r",12,VAL2,", [r",11,48,", -r",10,24,"]!") TEST_RPR( "strh r",2, VAL1,", [r",3, 24,"], r",4, 48,"") TEST_RPR( "strh r",10,VAL2,", [r",9, 48,"], -r",11,24,"") TEST_UNSUPPORTED(__inst_arm(0xe1afc0ba) " @ strh r12, [pc, r10]!") @@ -489,9 +489,9 @@ void kprobe_arm_test_cases(void) TEST_UNSUPPORTED(__inst_arm(0xe089a0bf) " @ strh r10, [r9], pc") TEST_PR( "ldrh r0, [r",0, 48,", -r",2, 24,"]") - TEST_PR( "ldrcsh r14, [r",13,0, ", r",12, 48,"]") + TEST_PR( "ldrhcs r14, [r",13,0, ", r",12, 48,"]") TEST_PR( "ldrh r1, [r",2, 24,", r",3, 48,"]!") - TEST_PR( "ldrcch r12, [r",11,48,", -r",10,24,"]!") + TEST_PR( "ldrhcc r12, [r",11,48,", -r",10,24,"]!") TEST_PR( "ldrh r2, [r",3, 24,"], r",4, 48,"") TEST_PR( "ldrh r10, [r",9, 48,"], -r",11,24,"") TEST_UNSUPPORTED(__inst_arm(0xe1bfc0ba) " @ ldrh r12, [pc, r10]!") @@ -499,9 +499,9 @@ void kprobe_arm_test_cases(void) TEST_UNSUPPORTED(__inst_arm(0xe099a0bf) " @ ldrh r10, [r9], pc") TEST_RP( "strh r",0, VAL1,", [r",1, 24,", #-2]") - TEST_RP( "strmih r",14,VAL2,", [r",13,0, ", #2]") + TEST_RP( "strhmi r",14,VAL2,", [r",13,0, ", #2]") TEST_RP( "strh r",1, VAL1,", [r",2, 24,", #4]!") - TEST_RP( "strplh r",12,VAL2,", [r",11,24,", #-4]!") + TEST_RP( "strhpl r",12,VAL2,", [r",11,24,", #-4]!") TEST_RP( "strh r",2, VAL1,", [r",3, 24,"], #48") TEST_RP( "strh r",10,VAL2,", [r",9, 64,"], #-48") TEST_RP( "strh r",3, VAL1,", [r",13,TEST_MEMORY_SIZE,", #-"__stringify(MAX_STACK_SIZE)"]!") @@ -511,9 +511,9 @@ void kprobe_arm_test_cases(void) TEST_UNSUPPORTED(__inst_arm(0xe0c9f3b0) " @ strh pc, [r9], #48") TEST_P( "ldrh r0, [r",0, 24,", #-2]") - TEST_P( "ldrvsh r14, [r",13,0, ", #2]") + TEST_P( "ldrhvs r14, [r",13,0, ", #2]") TEST_P( "ldrh r1, [r",2, 24,", #4]!") - TEST_P( "ldrvch r12, [r",11,24,", #-4]!") + TEST_P( "ldrhvc r12, [r",11,24,", #-4]!") TEST_P( "ldrh r2, [r",3, 24,"], #48") TEST_P( "ldrh r10, [r",9, 64,"], #-48") TEST( "ldrh r0, [pc, #0]") @@ -521,18 +521,18 @@ void kprobe_arm_test_cases(void) TEST_UNSUPPORTED(__inst_arm(0xe0d9f3b0) " @ ldrh pc, [r9], #48") TEST_PR( "ldrsb r0, [r",0, 48,", -r",2, 24,"]") - TEST_PR( "ldrhisb r14, [r",13,0,", r",12, 48,"]") + TEST_PR( "ldrsbhi r14, [r",13,0,", r",12, 48,"]") TEST_PR( "ldrsb r1, [r",2, 24,", r",3, 48,"]!") - TEST_PR( "ldrlssb r12, [r",11,48,", -r",10,24,"]!") + TEST_PR( "ldrsbls r12, [r",11,48,", -r",10,24,"]!") TEST_PR( "ldrsb r2, [r",3, 24,"], r",4, 48,"") TEST_PR( "ldrsb r10, [r",9, 48,"], -r",11,24,"") TEST_UNSUPPORTED(__inst_arm(0xe1bfc0da) " @ ldrsb r12, [pc, r10]!") TEST_UNSUPPORTED(__inst_arm(0xe099f0db) " @ ldrsb pc, [r9], r11") TEST_P( "ldrsb r0, [r",0, 24,", #-1]") - TEST_P( "ldrgesb r14, [r",13,0, ", #1]") + TEST_P( "ldrsbge r14, [r",13,0, ", #1]") TEST_P( "ldrsb r1, [r",2, 24,", #4]!") - TEST_P( "ldrltsb r12, [r",11,24,", #-4]!") + TEST_P( "ldrsblt r12, [r",11,24,", #-4]!") TEST_P( "ldrsb r2, [r",3, 24,"], #48") TEST_P( "ldrsb r10, [r",9, 64,"], #-48") TEST( "ldrsb r0, [pc, #0]") @@ -540,18 +540,18 @@ void kprobe_arm_test_cases(void) TEST_UNSUPPORTED(__inst_arm(0xe0d9f3d0) " @ ldrsb pc, [r9], #48") TEST_PR( "ldrsh r0, [r",0, 48,", -r",2, 24,"]") - TEST_PR( "ldrgtsh r14, [r",13,0, ", r",12, 48,"]") + TEST_PR( "ldrshgt r14, [r",13,0, ", r",12, 48,"]") TEST_PR( "ldrsh r1, [r",2, 24,", r",3, 48,"]!") - TEST_PR( "ldrlesh r12, [r",11,48,", -r",10,24,"]!") + TEST_PR( "ldrshle r12, [r",11,48,", -r",10,24,"]!") TEST_PR( "ldrsh r2, [r",3, 24,"], r",4, 48,"") TEST_PR( "ldrsh r10, [r",9, 48,"], -r",11,24,"") TEST_UNSUPPORTED(__inst_arm(0xe1bfc0fa) " @ ldrsh r12, [pc, r10]!") TEST_UNSUPPORTED(__inst_arm(0xe099f0fb) " @ ldrsh pc, [r9], r11") TEST_P( "ldrsh r0, [r",0, 24,", #-1]") - TEST_P( "ldreqsh r14, [r",13,0 ,", #1]") + TEST_P( "ldrsheq r14, [r",13,0 ,", #1]") TEST_P( "ldrsh r1, [r",2, 24,", #4]!") - TEST_P( "ldrnesh r12, [r",11,24,", #-4]!") + TEST_P( "ldrshne r12, [r",11,24,", #-4]!") TEST_P( "ldrsh r2, [r",3, 24,"], #48") TEST_P( "ldrsh r10, [r",9, 64,"], #-48") TEST( "ldrsh r0, [pc, #0]") @@ -571,30 +571,30 @@ void kprobe_arm_test_cases(void) #if __LINUX_ARM_ARCH__ >= 5 TEST_RPR( "strd r",0, VAL1,", [r",1, 48,", -r",2,24,"]") - TEST_RPR( "strccd r",8, VAL2,", [r",11,0, ", r",12,48,"]") - TEST_UNSUPPORTED( "strccd r8, [r13, r12]") - TEST_UNSUPPORTED( "strccd r8, [r12, r13]") + TEST_RPR( "strdcc r",8, VAL2,", [r",11,0, ", r",12,48,"]") + TEST_UNSUPPORTED( "strdcc r8, [r13, r12]") + TEST_UNSUPPORTED( "strdcc r8, [r12, r13]") TEST_RPR( "strd r",4, VAL1,", [r",2, 24,", r",3, 48,"]!") - TEST_RPR( "strcsd r",12,VAL2,", [r",11,48,", -r",10,24,"]!") - TEST_RPR( "strd r",2, VAL1,", [r",5, 24,"], r",4,48,"") - TEST_RPR( "strd r",10,VAL2,", [r",9, 48,"], -r",7,24,"") + TEST_RPR( "strdcs r",12,VAL2,", r13, [r",11,48,", -r",10,24,"]!") + TEST_RPR( "strd r",2, VAL1,", r3, [r",5, 24,"], r",4,48,"") + TEST_RPR( "strd r",10,VAL2,", r11, [r",9, 48,"], -r",7,24,"") TEST_UNSUPPORTED(__inst_arm(0xe1afc0fa) " @ strd r12, [pc, r10]!") TEST_PR( "ldrd r0, [r",0, 48,", -r",2,24,"]") - TEST_PR( "ldrmid r8, [r",13,0, ", r",12,48,"]") + TEST_PR( "ldrdmi r8, [r",13,0, ", r",12,48,"]") TEST_PR( "ldrd r4, [r",2, 24,", r",3, 48,"]!") - TEST_PR( "ldrpld r6, [r",11,48,", -r",10,24,"]!") - TEST_PR( "ldrd r2, [r",5, 24,"], r",4,48,"") - TEST_PR( "ldrd r10, [r",9,48,"], -r",7,24,"") + TEST_PR( "ldrdpl r6, [r",11,48,", -r",10,24,"]!") + TEST_PR( "ldrd r2, r3, [r",5, 24,"], r",4,48,"") + TEST_PR( "ldrd r10, r11, [r",9,48,"], -r",7,24,"") TEST_UNSUPPORTED(__inst_arm(0xe1afc0da) " @ ldrd r12, [pc, r10]!") TEST_UNSUPPORTED(__inst_arm(0xe089f0db) " @ ldrd pc, [r9], r11") TEST_UNSUPPORTED(__inst_arm(0xe089e0db) " @ ldrd lr, [r9], r11") TEST_UNSUPPORTED(__inst_arm(0xe089c0df) " @ ldrd r12, [r9], pc") TEST_RP( "strd r",0, VAL1,", [r",1, 24,", #-8]") - TEST_RP( "strvsd r",8, VAL2,", [r",13,0, ", #8]") + TEST_RP( "strdvs r",8, VAL2,", [r",13,0, ", #8]") TEST_RP( "strd r",4, VAL1,", [r",2, 24,", #16]!") - TEST_RP( "strvcd r",12,VAL2,", [r",11,24,", #-16]!") + TEST_RP( "strdvc r",12,VAL2,", r13, [r",11,24,", #-16]!") TEST_RP( "strd r",2, VAL1,", [r",4, 24,"], #48") TEST_RP( "strd r",10,VAL2,", [r",9, 64,"], #-48") TEST_RP( "strd r",6, VAL1,", [r",13,TEST_MEMORY_SIZE,", #-"__stringify(MAX_STACK_SIZE)"]!") @@ -603,9 +603,9 @@ void kprobe_arm_test_cases(void) TEST_UNSUPPORTED(__inst_arm(0xe1efc3f0) " @ strd r12, [pc, #48]!") TEST_P( "ldrd r0, [r",0, 24,", #-8]") - TEST_P( "ldrhid r8, [r",13,0, ", #8]") + TEST_P( "ldrdhi r8, [r",13,0, ", #8]") TEST_P( "ldrd r4, [r",2, 24,", #16]!") - TEST_P( "ldrlsd r6, [r",11,24,", #-16]!") + TEST_P( "ldrdls r6, [r",11,24,", #-16]!") TEST_P( "ldrd r2, [r",5, 24,"], #48") TEST_P( "ldrd r10, [r",9,6,"], #-48") TEST_UNSUPPORTED(__inst_arm(0xe1efc3d0) " @ ldrd r12, [pc, #48]!") @@ -1084,63 +1084,63 @@ void kprobe_arm_test_cases(void) TEST_GROUP("Branch, branch with link, and block data transfer") TEST_P( "stmda r",0, 16*4,", {r0}") - TEST_P( "stmeqda r",4, 16*4,", {r0-r15}") - TEST_P( "stmneda r",8, 16*4,"!, {r8-r15}") + TEST_P( "stmdaeq r",4, 16*4,", {r0-r15}") + TEST_P( "stmdane r",8, 16*4,"!, {r8-r15}") TEST_P( "stmda r",12,16*4,"!, {r1,r3,r5,r7,r8-r11,r14}") TEST_P( "stmda r",13,0, "!, {pc}") TEST_P( "ldmda r",0, 16*4,", {r0}") - TEST_BF_P("ldmcsda r",4, 15*4,", {r0-r15}") - TEST_BF_P("ldmccda r",7, 15*4,"!, {r8-r15}") + TEST_BF_P("ldmdacs r",4, 15*4,", {r0-r15}") + TEST_BF_P("ldmdacc r",7, 15*4,"!, {r8-r15}") TEST_P( "ldmda r",12,16*4,"!, {r1,r3,r5,r7,r8-r11,r14}") TEST_BF_P("ldmda r",14,15*4,"!, {pc}") TEST_P( "stmia r",0, 16*4,", {r0}") - TEST_P( "stmmiia r",4, 16*4,", {r0-r15}") - TEST_P( "stmplia r",8, 16*4,"!, {r8-r15}") + TEST_P( "stmiami r",4, 16*4,", {r0-r15}") + TEST_P( "stmiapl r",8, 16*4,"!, {r8-r15}") TEST_P( "stmia r",12,16*4,"!, {r1,r3,r5,r7,r8-r11,r14}") TEST_P( "stmia r",14,0, "!, {pc}") TEST_P( "ldmia r",0, 16*4,", {r0}") - TEST_BF_P("ldmvsia r",4, 0, ", {r0-r15}") - TEST_BF_P("ldmvcia r",7, 8*4, "!, {r8-r15}") + TEST_BF_P("ldmiavs r",4, 0, ", {r0-r15}") + TEST_BF_P("ldmiavc r",7, 8*4, "!, {r8-r15}") TEST_P( "ldmia r",12,16*4,"!, {r1,r3,r5,r7,r8-r11,r14}") TEST_BF_P("ldmia r",14,15*4,"!, {pc}") TEST_P( "stmdb r",0, 16*4,", {r0}") - TEST_P( "stmhidb r",4, 16*4,", {r0-r15}") - TEST_P( "stmlsdb r",8, 16*4,"!, {r8-r15}") + TEST_P( "stmdbhi r",4, 16*4,", {r0-r15}") + TEST_P( "stmdbls r",8, 16*4,"!, {r8-r15}") TEST_P( "stmdb r",12,16*4,"!, {r1,r3,r5,r7,r8-r11,r14}") TEST_P( "stmdb r",13,4, "!, {pc}") TEST_P( "ldmdb r",0, 16*4,", {r0}") - TEST_BF_P("ldmgedb r",4, 16*4,", {r0-r15}") - TEST_BF_P("ldmltdb r",7, 16*4,"!, {r8-r15}") + TEST_BF_P("ldmdbge r",4, 16*4,", {r0-r15}") + TEST_BF_P("ldmdblt r",7, 16*4,"!, {r8-r15}") TEST_P( "ldmdb r",12,16*4,"!, {r1,r3,r5,r7,r8-r11,r14}") TEST_BF_P("ldmdb r",14,16*4,"!, {pc}") TEST_P( "stmib r",0, 16*4,", {r0}") - TEST_P( "stmgtib r",4, 16*4,", {r0-r15}") - TEST_P( "stmleib r",8, 16*4,"!, {r8-r15}") + TEST_P( "stmibgt r",4, 16*4,", {r0-r15}") + TEST_P( "stmible r",8, 16*4,"!, {r8-r15}") TEST_P( "stmib r",12,16*4,"!, {r1,r3,r5,r7,r8-r11,r14}") TEST_P( "stmib r",13,-4, "!, {pc}") TEST_P( "ldmib r",0, 16*4,", {r0}") - TEST_BF_P("ldmeqib r",4, -4,", {r0-r15}") - TEST_BF_P("ldmneib r",7, 7*4,"!, {r8-r15}") + TEST_BF_P("ldmibeq r",4, -4,", {r0-r15}") + TEST_BF_P("ldmibne r",7, 7*4,"!, {r8-r15}") TEST_P( "ldmib r",12,16*4,"!, {r1,r3,r5,r7,r8-r11,r14}") TEST_BF_P("ldmib r",14,14*4,"!, {pc}") TEST_P( "stmdb r",13,16*4,"!, {r3-r12,lr}") - TEST_P( "stmeqdb r",13,16*4,"!, {r3-r12}") - TEST_P( "stmnedb r",2, 16*4,", {r3-r12,lr}") + TEST_P( "stmdbeq r",13,16*4,"!, {r3-r12}") + TEST_P( "stmdbne r",2, 16*4,", {r3-r12,lr}") TEST_P( "stmdb r",13,16*4,"!, {r2-r12,lr}") TEST_P( "stmdb r",0, 16*4,", {r0-r12}") TEST_P( "stmdb r",0, 16*4,", {r0-r12,lr}") TEST_BF_P("ldmia r",13,5*4, "!, {r3-r12,pc}") - TEST_P( "ldmccia r",13,5*4, "!, {r3-r12}") - TEST_BF_P("ldmcsia r",2, 5*4, "!, {r3-r12,pc}") + TEST_P( "ldmiacc r",13,5*4, "!, {r3-r12}") + TEST_BF_P("ldmiacs r",2, 5*4, "!, {r3-r12,pc}") TEST_BF_P("ldmia r",13,4*4, "!, {r2-r12,pc}") TEST_P( "ldmia r",0, 16*4,", {r0-r12}") TEST_P( "ldmia r",0, 16*4,", {r0-r12,lr}") @@ -1174,80 +1174,80 @@ void kprobe_arm_test_cases(void) #define TEST_COPROCESSOR(code) TEST_UNSUPPORTED(code) #define COPROCESSOR_INSTRUCTIONS_ST_LD(two,cc) \ - TEST_COPROCESSOR("stc"two" 0, cr0, [r13, #4]") \ - TEST_COPROCESSOR("stc"two" 0, cr0, [r13, #-4]") \ - TEST_COPROCESSOR("stc"two" 0, cr0, [r13, #4]!") \ - TEST_COPROCESSOR("stc"two" 0, cr0, [r13, #-4]!") \ - TEST_COPROCESSOR("stc"two" 0, cr0, [r13], #4") \ - TEST_COPROCESSOR("stc"two" 0, cr0, [r13], #-4") \ - TEST_COPROCESSOR("stc"two" 0, cr0, [r13], {1}") \ - TEST_COPROCESSOR("stc"two"l 0, cr0, [r13, #4]") \ - TEST_COPROCESSOR("stc"two"l 0, cr0, [r13, #-4]") \ - TEST_COPROCESSOR("stc"two"l 0, cr0, [r13, #4]!") \ - TEST_COPROCESSOR("stc"two"l 0, cr0, [r13, #-4]!") \ - TEST_COPROCESSOR("stc"two"l 0, cr0, [r13], #4") \ - TEST_COPROCESSOR("stc"two"l 0, cr0, [r13], #-4") \ - TEST_COPROCESSOR("stc"two"l 0, cr0, [r13], {1}") \ - TEST_COPROCESSOR("ldc"two" 0, cr0, [r13, #4]") \ - TEST_COPROCESSOR("ldc"two" 0, cr0, [r13, #-4]") \ - TEST_COPROCESSOR("ldc"two" 0, cr0, [r13, #4]!") \ - TEST_COPROCESSOR("ldc"two" 0, cr0, [r13, #-4]!") \ - TEST_COPROCESSOR("ldc"two" 0, cr0, [r13], #4") \ - TEST_COPROCESSOR("ldc"two" 0, cr0, [r13], #-4") \ - TEST_COPROCESSOR("ldc"two" 0, cr0, [r13], {1}") \ - TEST_COPROCESSOR("ldc"two"l 0, cr0, [r13, #4]") \ - TEST_COPROCESSOR("ldc"two"l 0, cr0, [r13, #-4]") \ - TEST_COPROCESSOR("ldc"two"l 0, cr0, [r13, #4]!") \ - TEST_COPROCESSOR("ldc"two"l 0, cr0, [r13, #-4]!") \ - TEST_COPROCESSOR("ldc"two"l 0, cr0, [r13], #4") \ - TEST_COPROCESSOR("ldc"two"l 0, cr0, [r13], #-4") \ - TEST_COPROCESSOR("ldc"two"l 0, cr0, [r13], {1}") \ + TEST_COPROCESSOR("stc"two" p0, cr0, [r13, #4]") \ + TEST_COPROCESSOR("stc"two" p0, cr0, [r13, #-4]") \ + TEST_COPROCESSOR("stc"two" p0, cr0, [r13, #4]!") \ + TEST_COPROCESSOR("stc"two" p0, cr0, [r13, #-4]!") \ + TEST_COPROCESSOR("stc"two" p0, cr0, [r13], #4") \ + TEST_COPROCESSOR("stc"two" p0, cr0, [r13], #-4") \ + TEST_COPROCESSOR("stc"two" p0, cr0, [r13], {1}") \ + TEST_COPROCESSOR("stc"two"l p0, cr0, [r13, #4]") \ + TEST_COPROCESSOR("stc"two"l p0, cr0, [r13, #-4]") \ + TEST_COPROCESSOR("stc"two"l p0, cr0, [r13, #4]!") \ + TEST_COPROCESSOR("stc"two"l p0, cr0, [r13, #-4]!") \ + TEST_COPROCESSOR("stc"two"l p0, cr0, [r13], #4") \ + TEST_COPROCESSOR("stc"two"l p0, cr0, [r13], #-4") \ + TEST_COPROCESSOR("stc"two"l p0, cr0, [r13], {1}") \ + TEST_COPROCESSOR("ldc"two" p0, cr0, [r13, #4]") \ + TEST_COPROCESSOR("ldc"two" p0, cr0, [r13, #-4]") \ + TEST_COPROCESSOR("ldc"two" p0, cr0, [r13, #4]!") \ + TEST_COPROCESSOR("ldc"two" p0, cr0, [r13, #-4]!") \ + TEST_COPROCESSOR("ldc"two" p0, cr0, [r13], #4") \ + TEST_COPROCESSOR("ldc"two" p0, cr0, [r13], #-4") \ + TEST_COPROCESSOR("ldc"two" p0, cr0, [r13], {1}") \ + TEST_COPROCESSOR("ldc"two"l p0, cr0, [r13, #4]") \ + TEST_COPROCESSOR("ldc"two"l p0, cr0, [r13, #-4]") \ + TEST_COPROCESSOR("ldc"two"l p0, cr0, [r13, #4]!") \ + TEST_COPROCESSOR("ldc"two"l p0, cr0, [r13, #-4]!") \ + TEST_COPROCESSOR("ldc"two"l p0, cr0, [r13], #4") \ + TEST_COPROCESSOR("ldc"two"l p0, cr0, [r13], #-4") \ + TEST_COPROCESSOR("ldc"two"l p0, cr0, [r13], {1}") \ \ - TEST_COPROCESSOR( "stc"two" 0, cr0, [r15, #4]") \ - TEST_COPROCESSOR( "stc"two" 0, cr0, [r15, #-4]") \ + TEST_COPROCESSOR( "stc"two" p0, cr0, [r15, #4]") \ + TEST_COPROCESSOR( "stc"two" p0, cr0, [r15, #-4]") \ TEST_UNSUPPORTED(__inst_arm(0x##cc##daf0001) " @ stc"two" 0, cr0, [r15, #4]!") \ TEST_UNSUPPORTED(__inst_arm(0x##cc##d2f0001) " @ stc"two" 0, cr0, [r15, #-4]!") \ TEST_UNSUPPORTED(__inst_arm(0x##cc##caf0001) " @ stc"two" 0, cr0, [r15], #4") \ TEST_UNSUPPORTED(__inst_arm(0x##cc##c2f0001) " @ stc"two" 0, cr0, [r15], #-4") \ - TEST_COPROCESSOR( "stc"two" 0, cr0, [r15], {1}") \ - TEST_COPROCESSOR( "stc"two"l 0, cr0, [r15, #4]") \ - TEST_COPROCESSOR( "stc"two"l 0, cr0, [r15, #-4]") \ + TEST_COPROCESSOR( "stc"two" p0, cr0, [r15], {1}") \ + TEST_COPROCESSOR( "stc"two"l p0, cr0, [r15, #4]") \ + TEST_COPROCESSOR( "stc"two"l p0, cr0, [r15, #-4]") \ TEST_UNSUPPORTED(__inst_arm(0x##cc##def0001) " @ stc"two"l 0, cr0, [r15, #4]!") \ TEST_UNSUPPORTED(__inst_arm(0x##cc##d6f0001) " @ stc"two"l 0, cr0, [r15, #-4]!") \ TEST_UNSUPPORTED(__inst_arm(0x##cc##cef0001) " @ stc"two"l 0, cr0, [r15], #4") \ TEST_UNSUPPORTED(__inst_arm(0x##cc##c6f0001) " @ stc"two"l 0, cr0, [r15], #-4") \ - TEST_COPROCESSOR( "stc"two"l 0, cr0, [r15], {1}") \ - TEST_COPROCESSOR( "ldc"two" 0, cr0, [r15, #4]") \ - TEST_COPROCESSOR( "ldc"two" 0, cr0, [r15, #-4]") \ + TEST_COPROCESSOR( "stc"two"l p0, cr0, [r15], {1}") \ + TEST_COPROCESSOR( "ldc"two" p0, cr0, [r15, #4]") \ + TEST_COPROCESSOR( "ldc"two" p0, cr0, [r15, #-4]") \ TEST_UNSUPPORTED(__inst_arm(0x##cc##dbf0001) " @ ldc"two" 0, cr0, [r15, #4]!") \ TEST_UNSUPPORTED(__inst_arm(0x##cc##d3f0001) " @ ldc"two" 0, cr0, [r15, #-4]!") \ TEST_UNSUPPORTED(__inst_arm(0x##cc##cbf0001) " @ ldc"two" 0, cr0, [r15], #4") \ TEST_UNSUPPORTED(__inst_arm(0x##cc##c3f0001) " @ ldc"two" 0, cr0, [r15], #-4") \ - TEST_COPROCESSOR( "ldc"two" 0, cr0, [r15], {1}") \ - TEST_COPROCESSOR( "ldc"two"l 0, cr0, [r15, #4]") \ - TEST_COPROCESSOR( "ldc"two"l 0, cr0, [r15, #-4]") \ + TEST_COPROCESSOR( "ldc"two" p0, cr0, [r15], {1}") \ + TEST_COPROCESSOR( "ldc"two"l p0, cr0, [r15, #4]") \ + TEST_COPROCESSOR( "ldc"two"l p0, cr0, [r15, #-4]") \ TEST_UNSUPPORTED(__inst_arm(0x##cc##dff0001) " @ ldc"two"l 0, cr0, [r15, #4]!") \ TEST_UNSUPPORTED(__inst_arm(0x##cc##d7f0001) " @ ldc"two"l 0, cr0, [r15, #-4]!") \ TEST_UNSUPPORTED(__inst_arm(0x##cc##cff0001) " @ ldc"two"l 0, cr0, [r15], #4") \ TEST_UNSUPPORTED(__inst_arm(0x##cc##c7f0001) " @ ldc"two"l 0, cr0, [r15], #-4") \ - TEST_COPROCESSOR( "ldc"two"l 0, cr0, [r15], {1}") + TEST_COPROCESSOR( "ldc"two"l p0, cr0, [r15], {1}") #define COPROCESSOR_INSTRUCTIONS_MC_MR(two,cc) \ \ - TEST_COPROCESSOR( "mcrr"two" 0, 15, r0, r14, cr0") \ - TEST_COPROCESSOR( "mcrr"two" 15, 0, r14, r0, cr15") \ + TEST_COPROCESSOR( "mcrr"two" p0, 15, r0, r14, cr0") \ + TEST_COPROCESSOR( "mcrr"two" p15, 0, r14, r0, cr15") \ TEST_UNSUPPORTED(__inst_arm(0x##cc##c4f00f0) " @ mcrr"two" 0, 15, r0, r15, cr0") \ TEST_UNSUPPORTED(__inst_arm(0x##cc##c40ff0f) " @ mcrr"two" 15, 0, r15, r0, cr15") \ - TEST_COPROCESSOR( "mrrc"two" 0, 15, r0, r14, cr0") \ - TEST_COPROCESSOR( "mrrc"two" 15, 0, r14, r0, cr15") \ + TEST_COPROCESSOR( "mrrc"two" p0, 15, r0, r14, cr0") \ + TEST_COPROCESSOR( "mrrc"two" p15, 0, r14, r0, cr15") \ TEST_UNSUPPORTED(__inst_arm(0x##cc##c5f00f0) " @ mrrc"two" 0, 15, r0, r15, cr0") \ TEST_UNSUPPORTED(__inst_arm(0x##cc##c50ff0f) " @ mrrc"two" 15, 0, r15, r0, cr15") \ - TEST_COPROCESSOR( "cdp"two" 15, 15, cr15, cr15, cr15, 7") \ - TEST_COPROCESSOR( "cdp"two" 0, 0, cr0, cr0, cr0, 0") \ - TEST_COPROCESSOR( "mcr"two" 15, 7, r15, cr15, cr15, 7") \ - TEST_COPROCESSOR( "mcr"two" 0, 0, r0, cr0, cr0, 0") \ - TEST_COPROCESSOR( "mrc"two" 15, 7, r15, cr15, cr15, 7") \ - TEST_COPROCESSOR( "mrc"two" 0, 0, r0, cr0, cr0, 0") + TEST_COPROCESSOR( "cdp"two" p15, 15, cr15, cr15, cr15, 7") \ + TEST_COPROCESSOR( "cdp"two" p0, 0, cr0, cr0, cr0, 0") \ + TEST_COPROCESSOR( "mcr"two" p15, 7, r15, cr15, cr15, 7") \ + TEST_COPROCESSOR( "mcr"two" p0, 0, r0, cr0, cr0, 0") \ + TEST_COPROCESSOR( "mrc"two" p15, 7, r14, cr15, cr15, 7") \ + TEST_COPROCESSOR( "mrc"two" p0, 0, r0, cr0, cr0, 0") COPROCESSOR_INSTRUCTIONS_ST_LD("",e) #if __LINUX_ARM_ARCH__ >= 5 diff --git a/arch/arm/probes/kprobes/test-core.h b/arch/arm/probes/kprobes/test-core.h index 19a5b2add41e..f1d5583e7bbb 100644 --- a/arch/arm/probes/kprobes/test-core.h +++ b/arch/arm/probes/kprobes/test-core.h @@ -108,6 +108,7 @@ struct test_arg_end { #define TESTCASE_START(title) \ __asm__ __volatile__ ( \ + ".syntax unified \n\t" \ "bl __kprobes_test_case_start \n\t" \ ".pushsection .rodata \n\t" \ "10: \n\t" \ diff --git a/arch/arm/tools/Makefile b/arch/arm/tools/Makefile index 3654f979851b..87de1f63f649 100644 --- a/arch/arm/tools/Makefile +++ b/arch/arm/tools/Makefile @@ -8,16 +8,15 @@ gen := arch/$(ARCH)/include/generated kapi := $(gen)/asm uapi := $(gen)/uapi/asm -syshdr := $(srctree)/$(src)/syscallhdr.sh +syshdr := $(srctree)/scripts/syscallhdr.sh sysnr := $(srctree)/$(src)/syscallnr.sh -systbl := $(srctree)/$(src)/syscalltbl.sh +systbl := $(srctree)/scripts/syscalltbl.sh syscall := $(src)/syscall.tbl gen-y := $(gen)/calls-oabi.S gen-y += $(gen)/calls-eabi.S kapi-hdrs-y := $(kapi)/unistd-nr.h kapi-hdrs-y += $(kapi)/mach-types.h -uapi-hdrs-y := $(uapi)/unistd-common.h uapi-hdrs-y += $(uapi)/unistd-oabi.h uapi-hdrs-y += $(uapi)/unistd-eabi.h @@ -41,28 +40,21 @@ $(kapi)/mach-types.h: $(src)/gen-mach-types $(src)/mach-types FORCE $(call if_changed,gen_mach) quiet_cmd_syshdr = SYSHDR $@ - cmd_syshdr = $(CONFIG_SHELL) '$(syshdr)' '$<' '$@' \ - '$(syshdr_abi_$(basetarget))' \ - '$(syshdr_pfx_$(basetarget))' \ - '__NR_SYSCALL_BASE' + cmd_syshdr = $(CONFIG_SHELL) $(syshdr) --abis $(abis) \ + --offset __NR_SYSCALL_BASE $< $@ quiet_cmd_systbl = SYSTBL $@ - cmd_systbl = $(CONFIG_SHELL) '$(systbl)' '$<' '$@' \ - '$(systbl_abi_$(basetarget))' + cmd_systbl = $(CONFIG_SHELL) $(systbl) --abis $(abis) $< $@ quiet_cmd_sysnr = SYSNR $@ cmd_sysnr = $(CONFIG_SHELL) '$(sysnr)' '$<' '$@' \ '$(syshdr_abi_$(basetarget))' -syshdr_abi_unistd-common := common -$(uapi)/unistd-common.h: $(syscall) $(syshdr) FORCE - $(call if_changed,syshdr) - -syshdr_abi_unistd-oabi := oabi +$(uapi)/unistd-oabi.h: abis := common,oabi $(uapi)/unistd-oabi.h: $(syscall) $(syshdr) FORCE $(call if_changed,syshdr) -syshdr_abi_unistd-eabi := eabi +$(uapi)/unistd-eabi.h: abis := common,eabi $(uapi)/unistd-eabi.h: $(syscall) $(syshdr) FORCE $(call if_changed,syshdr) @@ -70,10 +62,10 @@ sysnr_abi_unistd-nr := common,oabi,eabi,compat $(kapi)/unistd-nr.h: $(syscall) $(sysnr) FORCE $(call if_changed,sysnr) -systbl_abi_calls-oabi := common,oabi +$(gen)/calls-oabi.S: abis := common,oabi $(gen)/calls-oabi.S: $(syscall) $(systbl) FORCE $(call if_changed,systbl) -systbl_abi_calls-eabi := common,eabi +$(gen)/calls-eabi.S: abis := common,eabi $(gen)/calls-eabi.S: $(syscall) $(systbl) FORCE $(call if_changed,systbl) diff --git a/arch/arm/tools/syscallhdr.sh b/arch/arm/tools/syscallhdr.sh deleted file mode 100644 index 6b2f25cdd721..000000000000 --- a/arch/arm/tools/syscallhdr.sh +++ /dev/null @@ -1,31 +0,0 @@ -#!/bin/sh -# SPDX-License-Identifier: GPL-2.0 - -in="$1" -out="$2" -my_abis=`echo "($3)" | tr ',' '|'` -prefix="$4" -offset="$5" - -fileguard=_ASM_ARM_`basename "$out" | sed \ - -e 'y/abcdefghijklmnopqrstuvwxyz/ABCDEFGHIJKLMNOPQRSTUVWXYZ/' \ - -e 's/[^A-Z0-9_]/_/g' -e 's/__/_/g'` -if echo $out | grep -q uapi; then - fileguard="_UAPI$fileguard" -fi -grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | ( - echo "#ifndef ${fileguard}" - echo "#define ${fileguard} 1" - echo "" - - while read nr abi name entry ; do - if [ -z "$offset" ]; then - echo "#define __NR_${prefix}${name} $nr" - else - echo "#define __NR_${prefix}${name} ($offset + $nr)" - fi - done - - echo "" - echo "#endif /* ${fileguard} */" -) > "$out" diff --git a/arch/arm/tools/syscalltbl.sh b/arch/arm/tools/syscalltbl.sh deleted file mode 100644 index ae7e93cfbfd3..000000000000 --- a/arch/arm/tools/syscalltbl.sh +++ /dev/null @@ -1,22 +0,0 @@ -#!/bin/sh -# SPDX-License-Identifier: GPL-2.0 -in="$1" -out="$2" -my_abis=`echo "($3)" | tr ',' '|'` - -grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | ( - while read nr abi name entry compat; do - if [ "$abi" = "eabi" -a -n "$compat" ]; then - echo "$in: error: a compat entry for an EABI syscall ($name) makes no sense" >&2 - exit 1 - fi - - if [ -n "$entry" ]; then - if [ -z "$compat" ]; then - echo "NATIVE($nr, $entry)" - else - echo "COMPAT($nr, $entry, $compat)" - fi - fi - done -) > "$out" diff --git a/arch/arm/xen/mm.c b/arch/arm/xen/mm.c index e1b12b242a32..f8f07469d259 100644 --- a/arch/arm/xen/mm.c +++ b/arch/arm/xen/mm.c @@ -152,7 +152,7 @@ static int __init xen_mm_init(void) struct gnttab_cache_flush cflush; if (!xen_swiotlb_detect()) return 0; - xen_swiotlb_init(1, false); + xen_swiotlb_init(); cflush.op = 0; cflush.a.dev_bus_addr = 0; diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 7f2a80091337..9f1d8566bbf9 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -11,6 +11,12 @@ config ARM64 select ACPI_PPTT if ACPI select ARCH_HAS_DEBUG_WX select ARCH_BINFMT_ELF_STATE + select ARCH_ENABLE_HUGEPAGE_MIGRATION if HUGETLB_PAGE && MIGRATION + select ARCH_ENABLE_MEMORY_HOTPLUG + select ARCH_ENABLE_MEMORY_HOTREMOVE + select ARCH_ENABLE_SPLIT_PMD_PTLOCK if PGTABLE_LEVELS > 2 + select ARCH_ENABLE_THP_MIGRATION if TRANSPARENT_HUGEPAGE + select ARCH_HAS_CACHE_LINE_SIZE select ARCH_HAS_DEBUG_VIRTUAL select ARCH_HAS_DEBUG_VM_PGTABLE select ARCH_HAS_DMA_PREP_COHERENT @@ -72,6 +78,7 @@ config ARM64 select ARCH_USE_QUEUED_SPINLOCKS select ARCH_USE_SYM_ANNOTATIONS select ARCH_SUPPORTS_DEBUG_PAGEALLOC + select ARCH_SUPPORTS_HUGETLBFS select ARCH_SUPPORTS_MEMORY_FAILURE select ARCH_SUPPORTS_SHADOW_CALL_STACK if CC_HAVE_SHADOW_CALL_STACK select ARCH_SUPPORTS_LTO_CLANG if CPU_LITTLE_ENDIAN @@ -163,7 +170,6 @@ config ARM64 select HAVE_CMPXCHG_DOUBLE select HAVE_CMPXCHG_LOCAL select HAVE_CONTEXT_TRACKING - select HAVE_DEBUG_BUGVERBOSE select HAVE_DEBUG_KMEMLEAK select HAVE_DMA_CONTIGUOUS select HAVE_DYNAMIC_FTRACE @@ -213,6 +219,7 @@ config ARM64 select SWIOTLB select SYSCTL_EXCEPTION_TRACE select THREAD_INFO_IN_TASK + select HAVE_ARCH_USERFAULTFD_MINOR if USERFAULTFD help ARM 64-bit (AArch64) Linux support. @@ -308,10 +315,7 @@ config ZONE_DMA32 bool "Support DMA32 zone" if EXPERT default y -config ARCH_ENABLE_MEMORY_HOTPLUG - def_bool y - -config ARCH_ENABLE_MEMORY_HOTREMOVE +config ARCH_MHP_MEMMAP_ON_MEMORY_ENABLE def_bool y config SMP @@ -1056,32 +1060,15 @@ source "kernel/Kconfig.hz" config ARCH_SPARSEMEM_ENABLE def_bool y select SPARSEMEM_VMEMMAP_ENABLE - -config ARCH_SPARSEMEM_DEFAULT - def_bool ARCH_SPARSEMEM_ENABLE - -config ARCH_SELECT_MEMORY_MODEL - def_bool ARCH_SPARSEMEM_ENABLE - -config ARCH_FLATMEM_ENABLE - def_bool !NUMA + select SPARSEMEM_VMEMMAP config HW_PERF_EVENTS def_bool y depends on ARM_PMU -config SYS_SUPPORTS_HUGETLBFS - def_bool y - -config ARCH_HAS_CACHE_LINE_SIZE - def_bool y - config ARCH_HAS_FILTER_PGPROT def_bool y -config ARCH_ENABLE_SPLIT_PMD_PTLOCK - def_bool y if PGTABLE_LEVELS > 2 - # Supported by clang >= 7.0 config CC_HAVE_SHADOW_CALL_STACK def_bool $(cc-option, -fsanitize=shadow-call-stack -ffixed-x18) @@ -1923,14 +1910,6 @@ config SYSVIPC_COMPAT def_bool y depends on COMPAT && SYSVIPC -config ARCH_ENABLE_HUGEPAGE_MIGRATION - def_bool y - depends on HUGETLB_PAGE && MIGRATION - -config ARCH_ENABLE_THP_MIGRATION - def_bool y - depends on TRANSPARENT_HUGEPAGE - menu "Power management options" source "kernel/power/Kconfig" diff --git a/arch/arm64/boot/dts/rockchip/rk3368.dtsi b/arch/arm64/boot/dts/rockchip/rk3368.dtsi index 242f821a90ba..dfc6376171d0 100644 --- a/arch/arm64/boot/dts/rockchip/rk3368.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3368.dtsi @@ -558,7 +558,6 @@ pinctrl-names = "default"; pinctrl-0 = <&pwm0_pin>; clocks = <&cru PCLK_PWM1>; - clock-names = "pwm"; status = "disabled"; }; @@ -569,7 +568,6 @@ pinctrl-names = "default"; pinctrl-0 = <&pwm1_pin>; clocks = <&cru PCLK_PWM1>; - clock-names = "pwm"; status = "disabled"; }; @@ -578,7 +576,6 @@ reg = <0x0 0xff680020 0x0 0x10>; #pwm-cells = <3>; clocks = <&cru PCLK_PWM1>; - clock-names = "pwm"; status = "disabled"; }; @@ -589,7 +586,6 @@ pinctrl-names = "default"; pinctrl-0 = <&pwm3_pin>; clocks = <&cru PCLK_PWM1>; - clock-names = "pwm"; status = "disabled"; }; diff --git a/arch/arm64/boot/dts/rockchip/rk3399.dtsi b/arch/arm64/boot/dts/rockchip/rk3399.dtsi index 0f2879cc1a66..634a91af8e83 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3399.dtsi @@ -1182,7 +1182,6 @@ pinctrl-names = "default"; pinctrl-0 = <&pwm0_pin>; clocks = <&pmucru PCLK_RKPWM_PMU>; - clock-names = "pwm"; status = "disabled"; }; @@ -1193,7 +1192,6 @@ pinctrl-names = "default"; pinctrl-0 = <&pwm1_pin>; clocks = <&pmucru PCLK_RKPWM_PMU>; - clock-names = "pwm"; status = "disabled"; }; @@ -1204,7 +1202,6 @@ pinctrl-names = "default"; pinctrl-0 = <&pwm2_pin>; clocks = <&pmucru PCLK_RKPWM_PMU>; - clock-names = "pwm"; status = "disabled"; }; @@ -1215,7 +1212,6 @@ pinctrl-names = "default"; pinctrl-0 = <&pwm3a_pin>; clocks = <&pmucru PCLK_RKPWM_PMU>; - clock-names = "pwm"; status = "disabled"; }; diff --git a/arch/arm64/include/asm/daifflags.h b/arch/arm64/include/asm/daifflags.h index 5eb7af9c4557..55f57dfa8e2f 100644 --- a/arch/arm64/include/asm/daifflags.h +++ b/arch/arm64/include/asm/daifflags.h @@ -131,6 +131,9 @@ static inline void local_daif_inherit(struct pt_regs *regs) if (interrupts_enabled(regs)) trace_hardirqs_on(); + if (system_uses_irq_prio_masking()) + gic_write_pmr(regs->pmr_save); + /* * We can't use local_daif_restore(regs->pstate) here as * system_has_prio_mask_debugging() won't restore the I bit if it can diff --git a/arch/arm64/include/asm/kernel-pgtable.h b/arch/arm64/include/asm/kernel-pgtable.h index 587c504a4c8b..d44df9d62fc9 100644 --- a/arch/arm64/include/asm/kernel-pgtable.h +++ b/arch/arm64/include/asm/kernel-pgtable.h @@ -136,7 +136,7 @@ * has a direct correspondence, and needs to appear sufficiently aligned * in the virtual address space. */ -#if defined(CONFIG_SPARSEMEM_VMEMMAP) && ARM64_MEMSTART_SHIFT < SECTION_SIZE_BITS +#if ARM64_MEMSTART_SHIFT < SECTION_SIZE_BITS #define ARM64_MEMSTART_ALIGN (1UL << SECTION_SIZE_BITS) #else #define ARM64_MEMSTART_ALIGN (1UL << ARM64_MEMSTART_SHIFT) diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index 6d9915d066fa..87b90dc27a43 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -345,7 +345,7 @@ static inline void *phys_to_virt(phys_addr_t x) */ #define ARCH_PFN_OFFSET ((unsigned long)PHYS_PFN_OFFSET) -#if !defined(CONFIG_SPARSEMEM_VMEMMAP) || defined(CONFIG_DEBUG_VIRTUAL) +#if defined(CONFIG_DEBUG_VIRTUAL) #define page_to_virt(x) ({ \ __typeof__(x) __page = x; \ void *__addr = __va(page_to_phys(__page)); \ @@ -365,7 +365,7 @@ static inline void *phys_to_virt(phys_addr_t x) u64 __addr = VMEMMAP_START + (__idx * sizeof(struct page)); \ (struct page *)__addr; \ }) -#endif /* !CONFIG_SPARSEMEM_VMEMMAP || CONFIG_DEBUG_VIRTUAL */ +#endif /* CONFIG_DEBUG_VIRTUAL */ #define virt_addr_valid(addr) ({ \ __typeof__(addr) __addr = __tag_reset(addr); \ diff --git a/arch/arm64/include/asm/sparsemem.h b/arch/arm64/include/asm/sparsemem.h index eb4a75d720ed..4b73463423c3 100644 --- a/arch/arm64/include/asm/sparsemem.h +++ b/arch/arm64/include/asm/sparsemem.h @@ -5,7 +5,6 @@ #ifndef __ASM_SPARSEMEM_H #define __ASM_SPARSEMEM_H -#ifdef CONFIG_SPARSEMEM #define MAX_PHYSMEM_BITS CONFIG_ARM64_PA_BITS /* @@ -27,6 +26,4 @@ #define SECTION_SIZE_BITS 27 #endif /* CONFIG_ARM64_64K_PAGES */ -#endif /* CONFIG_SPARSEMEM*/ - #endif diff --git a/arch/arm64/kernel/alternative.c b/arch/arm64/kernel/alternative.c index abc84636af07..c906d20c7b52 100644 --- a/arch/arm64/kernel/alternative.c +++ b/arch/arm64/kernel/alternative.c @@ -133,11 +133,10 @@ static void clean_dcache_range_nopatch(u64 start, u64 end) } while (cur += d_size, cur < end); } -static void __nocfi __apply_alternatives(void *alt_region, bool is_module, - unsigned long *feature_mask) +static void __nocfi __apply_alternatives(struct alt_region *region, bool is_module, + unsigned long *feature_mask) { struct alt_instr *alt; - struct alt_region *region = alt_region; __le32 *origptr, *updptr; alternative_cb_t alt_cb; diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 30c82d38c189..efed2830d141 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -68,6 +68,7 @@ #include <linux/sort.h> #include <linux/stop_machine.h> #include <linux/types.h> +#include <linux/minmax.h> #include <linux/mm.h> #include <linux/cpu.h> #include <linux/kasan.h> @@ -694,14 +695,14 @@ static s64 arm64_ftr_safe_value(const struct arm64_ftr_bits *ftrp, s64 new, ret = ftrp->safe_val; break; case FTR_LOWER_SAFE: - ret = new < cur ? new : cur; + ret = min(new, cur); break; case FTR_HIGHER_OR_ZERO_SAFE: if (!cur || !new) break; fallthrough; case FTR_HIGHER_SAFE: - ret = new > cur ? new : cur; + ret = max(new, cur); break; default: BUG(); diff --git a/arch/arm64/kernel/cpuidle.c b/arch/arm64/kernel/cpuidle.c index b512b5503f6e..03991eeff643 100644 --- a/arch/arm64/kernel/cpuidle.c +++ b/arch/arm64/kernel/cpuidle.c @@ -29,7 +29,7 @@ int arm_cpuidle_init(unsigned int cpu) /** * arm_cpuidle_suspend() - function to enter a low-power idle state - * @arg: argument to pass to CPU suspend operations + * @index: argument to pass to CPU suspend operations * * Return: 0 on success, -EOPNOTSUPP if CPU suspend hook not initialized, CPU * operations back-end error code otherwise. diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c index a1ec351c36bd..340d04e13617 100644 --- a/arch/arm64/kernel/entry-common.c +++ b/arch/arm64/kernel/entry-common.c @@ -230,14 +230,6 @@ static void noinstr el1_dbg(struct pt_regs *regs, unsigned long esr) { unsigned long far = read_sysreg(far_el1); - /* - * The CPU masked interrupts, and we are leaving them masked during - * do_debug_exception(). Update PMR as if we had called - * local_daif_mask(). - */ - if (system_uses_irq_prio_masking()) - gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET); - arm64_enter_el1_dbg(regs); if (!cortex_a76_erratum_1463225_debug_handler(regs)) do_debug_exception(far, esr, regs); @@ -404,9 +396,6 @@ static void noinstr el0_dbg(struct pt_regs *regs, unsigned long esr) /* Only watchpoints write FAR_EL1, otherwise its UNKNOWN */ unsigned long far = read_sysreg(far_el1); - if (system_uses_irq_prio_masking()) - gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET); - enter_from_user_mode(); do_debug_exception(far, esr, regs); local_daif_restore(DAIF_PROCCTX_NOIRQ); @@ -414,9 +403,6 @@ static void noinstr el0_dbg(struct pt_regs *regs, unsigned long esr) static void noinstr el0_svc(struct pt_regs *regs) { - if (system_uses_irq_prio_masking()) - gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET); - enter_from_user_mode(); cortex_a76_erratum_1463225_svc_handler(); do_el0_svc(regs); @@ -492,9 +478,6 @@ static void noinstr el0_cp15(struct pt_regs *regs, unsigned long esr) static void noinstr el0_svc_compat(struct pt_regs *regs) { - if (system_uses_irq_prio_masking()) - gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET); - enter_from_user_mode(); cortex_a76_erratum_1463225_svc_handler(); do_el0_svc_compat(regs); diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 4ac5455c0ead..3513984a88bd 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -285,16 +285,16 @@ alternative_else_nop_endif stp lr, x21, [sp, #S_LR] /* - * For exceptions from EL0, terminate the callchain here. + * For exceptions from EL0, create a terminal frame record. * For exceptions from EL1, create a synthetic frame record so the * interrupted code shows up in the backtrace. */ .if \el == 0 - mov x29, xzr + stp xzr, xzr, [sp, #S_STACKFRAME] .else stp x29, x22, [sp, #S_STACKFRAME] - add x29, sp, #S_STACKFRAME .endif + add x29, sp, #S_STACKFRAME #ifdef CONFIG_ARM64_SW_TTBR0_PAN alternative_if_not ARM64_HAS_PAN @@ -314,6 +314,8 @@ alternative_else_nop_endif alternative_if ARM64_HAS_IRQ_PRIO_MASKING mrs_s x20, SYS_ICC_PMR_EL1 str x20, [sp, #S_PMR_SAVE] + mov x20, #GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET + msr_s SYS_ICC_PMR_EL1, x20 alternative_else_nop_endif /* Re-enable tag checking (TCO set on exception entry) */ @@ -550,17 +552,7 @@ tsk .req x28 // current thread_info #endif .endm - .macro gic_prio_irq_setup, pmr:req, tmp:req -#ifdef CONFIG_ARM64_PSEUDO_NMI - alternative_if ARM64_HAS_IRQ_PRIO_MASKING - orr \tmp, \pmr, #GIC_PRIO_PSR_I_SET - msr_s SYS_ICC_PMR_EL1, \tmp - alternative_else_nop_endif -#endif - .endm - .macro el1_interrupt_handler, handler:req - gic_prio_irq_setup pmr=x20, tmp=x1 enable_da mov x0, sp @@ -590,7 +582,6 @@ alternative_else_nop_endif .endm .macro el0_interrupt_handler, handler:req - gic_prio_irq_setup pmr=x20, tmp=x0 user_exit_irqoff enable_da @@ -788,7 +779,6 @@ SYM_CODE_END(el0_fiq) SYM_CODE_START_LOCAL(el1_error) kernel_entry 1 mrs x1, esr_el1 - gic_prio_kentry_setup tmp=x2 enable_dbg mov x0, sp bl do_serror @@ -799,7 +789,6 @@ SYM_CODE_START_LOCAL(el0_error) kernel_entry 0 el0_error_naked: mrs x25, esr_el1 - gic_prio_kentry_setup tmp=x2 user_exit_irqoff enable_dbg mov x0, sp diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index cbf52109583b..b4bb67f17a2c 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -294,13 +294,10 @@ void __show_regs(struct pt_regs *regs) i = top_reg; while (i >= 0) { - printk("x%-2d: %016llx ", i, regs->regs[i]); - i--; + printk("x%-2d: %016llx", i, regs->regs[i]); - if (i % 2 == 0) { - pr_cont("x%-2d: %016llx ", i, regs->regs[i]); - i--; - } + while (i-- % 3) + pr_cont(" x%-2d: %016llx", i, regs->regs[i]); pr_cont("\n"); } diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c index 84b676bcf867..de07147a7926 100644 --- a/arch/arm64/kernel/stacktrace.c +++ b/arch/arm64/kernel/stacktrace.c @@ -68,10 +68,6 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame) unsigned long fp = frame->fp; struct stack_info info; - /* Terminal record; nothing to unwind */ - if (!fp) - return -ENOENT; - if (fp & 0xf) return -EINVAL; @@ -132,6 +128,12 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame) frame->pc = ptrauth_strip_insn_pac(frame->pc); + /* + * This is a terminal record, so we have finished unwinding. + */ + if (!frame->fp && !frame->pc) + return -ENOENT; + return 0; } NOKPROBE_SYMBOL(unwind_frame); diff --git a/arch/arm64/kernel/vdso/vdso.lds.S b/arch/arm64/kernel/vdso/vdso.lds.S index 61dbb4c838ef..a5e61e09ea92 100644 --- a/arch/arm64/kernel/vdso/vdso.lds.S +++ b/arch/arm64/kernel/vdso/vdso.lds.S @@ -31,6 +31,13 @@ SECTIONS .gnu.version_d : { *(.gnu.version_d) } .gnu.version_r : { *(.gnu.version_r) } + /* + * Discard .note.gnu.property sections which are unused and have + * different alignment requirement from vDSO note sections. + */ + /DISCARD/ : { + *(.note.GNU-stack .note.gnu.property) + } .note : { *(.note.*) } :text :note . = ALIGN(16); @@ -48,7 +55,6 @@ SECTIONS PROVIDE(end = .); /DISCARD/ : { - *(.note.GNU-stack) *(.data .data.* .gnu.linkonce.d.* .sdata*) *(.bss .sbss .dynbss .dynsbss) *(.eh_frame .eh_frame_hdr) diff --git a/arch/arm64/kernel/vdso32/Makefile b/arch/arm64/kernel/vdso32/Makefile index 789ad420f16b..3dba0c4f8f42 100644 --- a/arch/arm64/kernel/vdso32/Makefile +++ b/arch/arm64/kernel/vdso32/Makefile @@ -10,15 +10,7 @@ include $(srctree)/lib/vdso/Makefile # Same as cc-*option, but using CC_COMPAT instead of CC ifeq ($(CONFIG_CC_IS_CLANG), y) -COMPAT_GCC_TOOLCHAIN_DIR := $(dir $(shell which $(CROSS_COMPILE_COMPAT)elfedit)) -COMPAT_GCC_TOOLCHAIN := $(realpath $(COMPAT_GCC_TOOLCHAIN_DIR)/..) - CC_COMPAT_CLANG_FLAGS := --target=$(notdir $(CROSS_COMPILE_COMPAT:%-=%)) -CC_COMPAT_CLANG_FLAGS += --prefix=$(COMPAT_GCC_TOOLCHAIN_DIR)$(notdir $(CROSS_COMPILE_COMPAT)) -CC_COMPAT_CLANG_FLAGS += -no-integrated-as -Qunused-arguments -ifneq ($(COMPAT_GCC_TOOLCHAIN),) -CC_COMPAT_CLANG_FLAGS += --gcc-toolchain=$(COMPAT_GCC_TOOLCHAIN) -endif CC_COMPAT ?= $(CC) CC_COMPAT += $(CC_COMPAT_CLANG_FLAGS) diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c index 55ecf6de9ff7..58987a98e179 100644 --- a/arch/arm64/mm/hugetlbpage.c +++ b/arch/arm64/mm/hugetlbpage.c @@ -252,7 +252,7 @@ void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr, set_pte(ptep, pte); } -pte_t *huge_pte_alloc(struct mm_struct *mm, +pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, unsigned long sz) { pgd_t *pgdp; @@ -284,9 +284,8 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, */ ptep = pte_alloc_map(mm, pmdp, addr); } else if (sz == PMD_SIZE) { - if (IS_ENABLED(CONFIG_ARCH_WANT_HUGE_PMD_SHARE) && - pud_none(READ_ONCE(*pudp))) - ptep = huge_pmd_share(mm, addr, pudp); + if (want_pmd_share(vma, addr) && pud_none(READ_ONCE(*pudp))) + ptep = huge_pmd_share(mm, vma, addr, pudp); else ptep = (pte_t *)pmd_alloc(mm, pudp, addr); } else if (sz == (CONT_PMD_SIZE)) { diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index 0696a459ea95..16a2b2b1c54d 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -221,6 +221,7 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max) int pfn_valid(unsigned long pfn) { phys_addr_t addr = PFN_PHYS(pfn); + struct mem_section *ms; /* * Ensure the upper PAGE_SHIFT bits are clear in the @@ -231,10 +232,6 @@ int pfn_valid(unsigned long pfn) if (PHYS_PFN(addr) != pfn) return 0; -#ifdef CONFIG_SPARSEMEM -{ - struct mem_section *ms; - if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS) return 0; @@ -253,8 +250,7 @@ int pfn_valid(unsigned long pfn) */ if (!early_section(ms)) return pfn_section_valid(ms, pfn); -} -#endif + return memblock_is_map_memory(addr); } EXPORT_SYMBOL(pfn_valid); diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 70fa3cdbe841..6dd9369e3ea0 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -1113,7 +1113,6 @@ static void free_empty_tables(unsigned long addr, unsigned long end, } #endif -#ifdef CONFIG_SPARSEMEM_VMEMMAP #if !ARM64_SWAPPER_USES_SECTION_MAPS int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, struct vmem_altmap *altmap) @@ -1177,7 +1176,6 @@ void vmemmap_free(unsigned long start, unsigned long end, free_empty_tables(start, end, VMEMMAP_START, VMEMMAP_END); #endif } -#endif /* CONFIG_SPARSEMEM_VMEMMAP */ static inline pud_t *fixmap_pud(unsigned long addr) { diff --git a/arch/arm64/mm/ptdump.c b/arch/arm64/mm/ptdump.c index a50e92ea1878..a1937dfff31c 100644 --- a/arch/arm64/mm/ptdump.c +++ b/arch/arm64/mm/ptdump.c @@ -51,10 +51,8 @@ static struct addr_marker address_markers[] = { { FIXADDR_TOP, "Fixmap end" }, { PCI_IO_START, "PCI I/O start" }, { PCI_IO_END, "PCI I/O end" }, -#ifdef CONFIG_SPARSEMEM_VMEMMAP { VMEMMAP_START, "vmemmap start" }, { VMEMMAP_START + VMEMMAP_SIZE, "vmemmap end" }, -#endif { -1, NULL }, }; diff --git a/arch/csky/include/asm/Kbuild b/arch/csky/include/asm/Kbuild index cc24bb8e539f..904a18a818be 100644 --- a/arch/csky/include/asm/Kbuild +++ b/arch/csky/include/asm/Kbuild @@ -1,5 +1,6 @@ # SPDX-License-Identifier: GPL-2.0 generic-y += asm-offsets.h +generic-y += extable.h generic-y += gpio.h generic-y += kvm_para.h generic-y += qrwlock.h diff --git a/arch/csky/include/asm/asid.h b/arch/csky/include/asm/asid.h index ac08b0ffbe1f..6ff205a97a14 100644 --- a/arch/csky/include/asm/asid.h +++ b/arch/csky/include/asm/asid.h @@ -37,7 +37,7 @@ void asid_new_context(struct asid_info *info, atomic64_t *pasid, * Check the ASID is still valid for the context. If not generate a new ASID. * * @pasid: Pointer to the current ASID batch - * @cpu: current CPU ID. Must have been acquired throught get_cpu() + * @cpu: current CPU ID. Must have been acquired through get_cpu() */ static inline void asid_check_context(struct asid_info *info, atomic64_t *pasid, unsigned int cpu, diff --git a/arch/csky/include/asm/barrier.h b/arch/csky/include/asm/barrier.h index 84fc600c8b45..f4045dd53e17 100644 --- a/arch/csky/include/asm/barrier.h +++ b/arch/csky/include/asm/barrier.h @@ -64,7 +64,7 @@ /* * sync: completion barrier, all sync.xx instructions - * guarantee the last response recieved by bus transaction + * guarantee the last response received by bus transaction * made by ld/st instructions before sync.s * sync.s: inherit from sync, but also shareable to other cores * sync.i: inherit from sync, but also flush cpu pipeline diff --git a/arch/csky/include/asm/segment.h b/arch/csky/include/asm/segment.h index 589e8321dc14..5bc1cc62b87f 100644 --- a/arch/csky/include/asm/segment.h +++ b/arch/csky/include/asm/segment.h @@ -7,11 +7,4 @@ typedef struct { unsigned long seg; } mm_segment_t; -#define KERNEL_DS ((mm_segment_t) { 0xFFFFFFFF }) - -#define USER_DS ((mm_segment_t) { PAGE_OFFSET }) -#define get_fs() (current_thread_info()->addr_limit) -#define set_fs(x) (current_thread_info()->addr_limit = (x)) -#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg) - #endif /* __ASM_CSKY_SEGMENT_H */ diff --git a/arch/csky/include/asm/uaccess.h b/arch/csky/include/asm/uaccess.h index 3dec272e1fa3..ac83823fc437 100644 --- a/arch/csky/include/asm/uaccess.h +++ b/arch/csky/include/asm/uaccess.h @@ -3,122 +3,26 @@ #ifndef __ASM_CSKY_UACCESS_H #define __ASM_CSKY_UACCESS_H -/* - * User space memory access functions - */ -#include <linux/compiler.h> -#include <linux/errno.h> -#include <linux/types.h> -#include <linux/sched.h> -#include <linux/string.h> -#include <linux/version.h> -#include <asm/segment.h> +#define user_addr_max() \ + (uaccess_kernel() ? KERNEL_DS.seg : get_fs().seg) -static inline int access_ok(const void *addr, unsigned long size) +static inline int __access_ok(unsigned long addr, unsigned long size) { unsigned long limit = current_thread_info()->addr_limit.seg; - return (((unsigned long)addr < limit) && - ((unsigned long)(addr + size) < limit)); + return ((addr < limit) && ((addr + size) < limit)); } - -#define __addr_ok(addr) (access_ok(addr, 0)) - -extern int __put_user_bad(void); +#define __access_ok __access_ok /* - * Tell gcc we read from memory instead of writing: this is because - * we do not write to any memory gcc knows about, so there are no - * aliasing issues. + * __put_user_fn */ +extern int __put_user_bad(void); -/* - * These are the main single-value transfer routines. They automatically - * use the right size if we just have the right pointer type. - * - * This gets kind of ugly. We want to return _two_ values in "get_user()" - * and yet we don't want to do any pointers, because that is too much - * of a performance impact. Thus we have a few rather ugly macros here, - * and hide all the ugliness from the user. - * - * The "__xxx" versions of the user access functions are versions that - * do not verify the address space, that must have been done previously - * with a separate "access_ok()" call (this is used when we do multiple - * accesses to the same area of user memory). - * - * As we use the same address space for kernel and user data on - * Ckcore, we can just do these as direct assignments. (Of course, the - * exception handling means that it's no longer "just"...) - */ - -#define put_user(x, ptr) \ - __put_user_check((x), (ptr), sizeof(*(ptr))) - -#define __put_user(x, ptr) \ - __put_user_nocheck((x), (ptr), sizeof(*(ptr))) - -#define __ptr(x) ((unsigned long *)(x)) - -#define get_user(x, ptr) \ - __get_user_check((x), (ptr), sizeof(*(ptr))) - -#define __get_user(x, ptr) \ - __get_user_nocheck((x), (ptr), sizeof(*(ptr))) - -#define __put_user_nocheck(x, ptr, size) \ -({ \ - long __pu_err = 0; \ - typeof(*(ptr)) *__pu_addr = (ptr); \ - typeof(*(ptr)) __pu_val = (typeof(*(ptr)))(x); \ - if (__pu_addr) \ - __put_user_size(__pu_val, (__pu_addr), (size), \ - __pu_err); \ - __pu_err; \ -}) - -#define __put_user_check(x, ptr, size) \ -({ \ - long __pu_err = -EFAULT; \ - typeof(*(ptr)) *__pu_addr = (ptr); \ - typeof(*(ptr)) __pu_val = (typeof(*(ptr)))(x); \ - if (access_ok(__pu_addr, size) && __pu_addr) \ - __put_user_size(__pu_val, __pu_addr, (size), __pu_err); \ - __pu_err; \ -}) - -#define __put_user_size(x, ptr, size, retval) \ -do { \ - retval = 0; \ - switch (size) { \ - case 1: \ - __put_user_asm_b(x, ptr, retval); \ - break; \ - case 2: \ - __put_user_asm_h(x, ptr, retval); \ - break; \ - case 4: \ - __put_user_asm_w(x, ptr, retval); \ - break; \ - case 8: \ - __put_user_asm_64(x, ptr, retval); \ - break; \ - default: \ - __put_user_bad(); \ - } \ -} while (0) - -/* - * We don't tell gcc that we are accessing memory, but this is OK - * because we do not write to any memory gcc knows about, so there - * are no aliasing issues. - * - * Note that PC at a fault is the address *after* the faulting - * instruction. - */ #define __put_user_asm_b(x, ptr, err) \ do { \ int errcode; \ - asm volatile( \ + __asm__ __volatile__( \ "1: stb %1, (%2,0) \n" \ " br 3f \n" \ "2: mov %0, %3 \n" \ @@ -136,7 +40,7 @@ do { \ #define __put_user_asm_h(x, ptr, err) \ do { \ int errcode; \ - asm volatile( \ + __asm__ __volatile__( \ "1: sth %1, (%2,0) \n" \ " br 3f \n" \ "2: mov %0, %3 \n" \ @@ -154,7 +58,7 @@ do { \ #define __put_user_asm_w(x, ptr, err) \ do { \ int errcode; \ - asm volatile( \ + __asm__ __volatile__( \ "1: stw %1, (%2,0) \n" \ " br 3f \n" \ "2: mov %0, %3 \n" \ @@ -169,241 +73,149 @@ do { \ : "memory"); \ } while (0) -#define __put_user_asm_64(x, ptr, err) \ -do { \ - int tmp; \ - int errcode; \ - typeof(*(ptr))src = (typeof(*(ptr)))x; \ - typeof(*(ptr))*psrc = &src; \ - \ - asm volatile( \ - " ldw %3, (%1, 0) \n" \ - "1: stw %3, (%2, 0) \n" \ - " ldw %3, (%1, 4) \n" \ - "2: stw %3, (%2, 4) \n" \ - " br 4f \n" \ - "3: mov %0, %4 \n" \ - " br 4f \n" \ - ".section __ex_table, \"a\" \n" \ - ".align 2 \n" \ - ".long 1b, 3b \n" \ - ".long 2b, 3b \n" \ - ".previous \n" \ - "4: \n" \ - : "=r"(err), "=r"(psrc), "=r"(ptr), \ - "=r"(tmp), "=r"(errcode) \ - : "0"(err), "1"(psrc), "2"(ptr), "3"(0), "4"(-EFAULT) \ - : "memory"); \ -} while (0) - -#define __get_user_nocheck(x, ptr, size) \ -({ \ - long __gu_err; \ - __get_user_size(x, (ptr), (size), __gu_err); \ - __gu_err; \ -}) - -#define __get_user_check(x, ptr, size) \ -({ \ - int __gu_err = -EFAULT; \ - const __typeof__(*(ptr)) __user *__gu_ptr = (ptr); \ - if (access_ok(__gu_ptr, size) && __gu_ptr) \ - __get_user_size(x, __gu_ptr, size, __gu_err); \ - __gu_err; \ -}) - -#define __get_user_size(x, ptr, size, retval) \ -do { \ - switch (size) { \ - case 1: \ - __get_user_asm_common((x), ptr, "ldb", retval); \ - break; \ - case 2: \ - __get_user_asm_common((x), ptr, "ldh", retval); \ - break; \ - case 4: \ - __get_user_asm_common((x), ptr, "ldw", retval); \ - break; \ - default: \ - x = 0; \ - (retval) = __get_user_bad(); \ - } \ +#define __put_user_asm_64(x, ptr, err) \ +do { \ + int tmp; \ + int errcode; \ + \ + __asm__ __volatile__( \ + " ldw %3, (%1, 0) \n" \ + "1: stw %3, (%2, 0) \n" \ + " ldw %3, (%1, 4) \n" \ + "2: stw %3, (%2, 4) \n" \ + " br 4f \n" \ + "3: mov %0, %4 \n" \ + " br 4f \n" \ + ".section __ex_table, \"a\" \n" \ + ".align 2 \n" \ + ".long 1b, 3b \n" \ + ".long 2b, 3b \n" \ + ".previous \n" \ + "4: \n" \ + : "=r"(err), "=r"(x), "=r"(ptr), \ + "=r"(tmp), "=r"(errcode) \ + : "0"(err), "1"(x), "2"(ptr), "3"(0), \ + "4"(-EFAULT) \ + : "memory"); \ } while (0) -#define __get_user_asm_common(x, ptr, ins, err) \ -do { \ - int errcode; \ - asm volatile( \ - "1: " ins " %1, (%4,0) \n" \ - " br 3f \n" \ - /* Fix up codes */ \ - "2: mov %0, %2 \n" \ - " movi %1, 0 \n" \ - " br 3f \n" \ - ".section __ex_table,\"a\" \n" \ - ".align 2 \n" \ - ".long 1b, 2b \n" \ - ".previous \n" \ - "3: \n" \ - : "=r"(err), "=r"(x), "=r"(errcode) \ - : "0"(0), "r"(ptr), "2"(-EFAULT) \ - : "memory"); \ -} while (0) +static inline int __put_user_fn(size_t size, void __user *ptr, void *x) +{ + int retval = 0; + u32 tmp; + + switch (size) { + case 1: + tmp = *(u8 *)x; + __put_user_asm_b(tmp, ptr, retval); + break; + case 2: + tmp = *(u16 *)x; + __put_user_asm_h(tmp, ptr, retval); + break; + case 4: + tmp = *(u32 *)x; + __put_user_asm_w(tmp, ptr, retval); + break; + case 8: + __put_user_asm_64(x, (u64 *)ptr, retval); + break; + } + + return retval; +} +#define __put_user_fn __put_user_fn +/* + * __get_user_fn + */ extern int __get_user_bad(void); -#define ___copy_to_user(to, from, n) \ +#define __get_user_asm_common(x, ptr, ins, err) \ do { \ - int w0, w1, w2, w3; \ - asm volatile( \ - "0: cmpnei %1, 0 \n" \ - " bf 8f \n" \ - " mov %3, %1 \n" \ - " or %3, %2 \n" \ - " andi %3, 3 \n" \ - " cmpnei %3, 0 \n" \ - " bf 1f \n" \ - " br 5f \n" \ - "1: cmplti %0, 16 \n" /* 4W */ \ - " bt 3f \n" \ - " ldw %3, (%2, 0) \n" \ - " ldw %4, (%2, 4) \n" \ - " ldw %5, (%2, 8) \n" \ - " ldw %6, (%2, 12) \n" \ - "2: stw %3, (%1, 0) \n" \ - "9: stw %4, (%1, 4) \n" \ - "10: stw %5, (%1, 8) \n" \ - "11: stw %6, (%1, 12) \n" \ - " addi %2, 16 \n" \ - " addi %1, 16 \n" \ - " subi %0, 16 \n" \ - " br 1b \n" \ - "3: cmplti %0, 4 \n" /* 1W */ \ - " bt 5f \n" \ - " ldw %3, (%2, 0) \n" \ - "4: stw %3, (%1, 0) \n" \ - " addi %2, 4 \n" \ - " addi %1, 4 \n" \ - " subi %0, 4 \n" \ - " br 3b \n" \ - "5: cmpnei %0, 0 \n" /* 1B */ \ - " bf 13f \n" \ - " ldb %3, (%2, 0) \n" \ - "6: stb %3, (%1, 0) \n" \ - " addi %2, 1 \n" \ - " addi %1, 1 \n" \ - " subi %0, 1 \n" \ - " br 5b \n" \ - "7: subi %0, 4 \n" \ - "8: subi %0, 4 \n" \ - "12: subi %0, 4 \n" \ - " br 13f \n" \ - ".section __ex_table, \"a\" \n" \ - ".align 2 \n" \ - ".long 2b, 13f \n" \ - ".long 4b, 13f \n" \ - ".long 6b, 13f \n" \ - ".long 9b, 12b \n" \ - ".long 10b, 8b \n" \ - ".long 11b, 7b \n" \ - ".previous \n" \ - "13: \n" \ - : "=r"(n), "=r"(to), "=r"(from), "=r"(w0), \ - "=r"(w1), "=r"(w2), "=r"(w3) \ - : "0"(n), "1"(to), "2"(from) \ + int errcode; \ + __asm__ __volatile__( \ + "1: " ins " %1, (%4, 0) \n" \ + " br 3f \n" \ + "2: mov %0, %2 \n" \ + " movi %1, 0 \n" \ + " br 3f \n" \ + ".section __ex_table,\"a\" \n" \ + ".align 2 \n" \ + ".long 1b, 2b \n" \ + ".previous \n" \ + "3: \n" \ + : "=r"(err), "=r"(x), "=r"(errcode) \ + : "0"(0), "r"(ptr), "2"(-EFAULT) \ : "memory"); \ } while (0) -#define ___copy_from_user(to, from, n) \ +#define __get_user_asm_64(x, ptr, err) \ do { \ int tmp; \ - int nsave; \ - asm volatile( \ - "0: cmpnei %1, 0 \n" \ - " bf 7f \n" \ - " mov %3, %1 \n" \ - " or %3, %2 \n" \ - " andi %3, 3 \n" \ - " cmpnei %3, 0 \n" \ - " bf 1f \n" \ - " br 5f \n" \ - "1: cmplti %0, 16 \n" \ - " bt 3f \n" \ - "2: ldw %3, (%2, 0) \n" \ - "10: ldw %4, (%2, 4) \n" \ - " stw %3, (%1, 0) \n" \ - " stw %4, (%1, 4) \n" \ - "11: ldw %3, (%2, 8) \n" \ - "12: ldw %4, (%2, 12) \n" \ - " stw %3, (%1, 8) \n" \ - " stw %4, (%1, 12) \n" \ - " addi %2, 16 \n" \ - " addi %1, 16 \n" \ - " subi %0, 16 \n" \ - " br 1b \n" \ - "3: cmplti %0, 4 \n" \ - " bt 5f \n" \ - "4: ldw %3, (%2, 0) \n" \ - " stw %3, (%1, 0) \n" \ - " addi %2, 4 \n" \ - " addi %1, 4 \n" \ - " subi %0, 4 \n" \ - " br 3b \n" \ - "5: cmpnei %0, 0 \n" \ - " bf 7f \n" \ - "6: ldb %3, (%2, 0) \n" \ - " stb %3, (%1, 0) \n" \ - " addi %2, 1 \n" \ - " addi %1, 1 \n" \ - " subi %0, 1 \n" \ - " br 5b \n" \ - "8: stw %3, (%1, 0) \n" \ - " subi %0, 4 \n" \ - " bf 7f \n" \ - "9: subi %0, 8 \n" \ - " bf 7f \n" \ - "13: stw %3, (%1, 8) \n" \ - " subi %0, 12 \n" \ - " bf 7f \n" \ - ".section __ex_table, \"a\" \n" \ - ".align 2 \n" \ - ".long 2b, 7f \n" \ - ".long 4b, 7f \n" \ - ".long 6b, 7f \n" \ - ".long 10b, 8b \n" \ - ".long 11b, 9b \n" \ - ".long 12b,13b \n" \ - ".previous \n" \ - "7: \n" \ - : "=r"(n), "=r"(to), "=r"(from), "=r"(nsave), \ - "=r"(tmp) \ - : "0"(n), "1"(to), "2"(from) \ + int errcode; \ + \ + __asm__ __volatile__( \ + "1: ldw %3, (%2, 0) \n" \ + " stw %3, (%1, 0) \n" \ + "2: ldw %3, (%2, 4) \n" \ + " stw %3, (%1, 4) \n" \ + " br 4f \n" \ + "3: mov %0, %4 \n" \ + " br 4f \n" \ + ".section __ex_table, \"a\" \n" \ + ".align 2 \n" \ + ".long 1b, 3b \n" \ + ".long 2b, 3b \n" \ + ".previous \n" \ + "4: \n" \ + : "=r"(err), "=r"(x), "=r"(ptr), \ + "=r"(tmp), "=r"(errcode) \ + : "0"(err), "1"(x), "2"(ptr), "3"(0), \ + "4"(-EFAULT) \ : "memory"); \ } while (0) +static inline int __get_user_fn(size_t size, const void __user *ptr, void *x) +{ + int retval; + u32 tmp; + + switch (size) { + case 1: + __get_user_asm_common(tmp, ptr, "ldb", retval); + *(u8 *)x = (u8)tmp; + break; + case 2: + __get_user_asm_common(tmp, ptr, "ldh", retval); + *(u16 *)x = (u16)tmp; + break; + case 4: + __get_user_asm_common(tmp, ptr, "ldw", retval); + *(u32 *)x = (u32)tmp; + break; + case 8: + __get_user_asm_64(x, ptr, retval); + break; + } + + return retval; +} +#define __get_user_fn __get_user_fn + unsigned long raw_copy_from_user(void *to, const void *from, unsigned long n); unsigned long raw_copy_to_user(void *to, const void *from, unsigned long n); -unsigned long clear_user(void *to, unsigned long n); unsigned long __clear_user(void __user *to, unsigned long n); +#define __clear_user __clear_user -long strncpy_from_user(char *dst, const char *src, long count); long __strncpy_from_user(char *dst, const char *src, long count); +#define __strncpy_from_user __strncpy_from_user -/* - * Return the size of a string (including the ending 0) - * - * Return 0 on exception, a value greater than N if too long - */ -long strnlen_user(const char *src, long n); - -#define strlen_user(str) strnlen_user(str, 32767) - -struct exception_table_entry { - unsigned long insn; - unsigned long nextinsn; -}; +long __strnlen_user(const char *s, long n); +#define __strnlen_user __strnlen_user -extern int fixup_exception(struct pt_regs *regs); +#include <asm/segment.h> +#include <asm-generic/uaccess.h> #endif /* __ASM_CSKY_UACCESS_H */ diff --git a/arch/csky/include/asm/vdso.h b/arch/csky/include/asm/vdso.h index eb5142f9c564..bdce581b5fcb 100644 --- a/arch/csky/include/asm/vdso.h +++ b/arch/csky/include/asm/vdso.h @@ -16,7 +16,7 @@ struct vdso_data { * offset of 0, but since the linker must support setting weak undefined * symbols to the absolute address 0 it also happens to support other low * addresses even when the code model suggests those low addresses would not - * otherwise be availiable. + * otherwise be available. */ #define VDSO_SYMBOL(base, name) \ ({ \ diff --git a/arch/csky/kernel/entry.S b/arch/csky/kernel/entry.S index c1bd7a6b4ab6..00e3c8ebf9b8 100644 --- a/arch/csky/kernel/entry.S +++ b/arch/csky/kernel/entry.S @@ -9,7 +9,6 @@ #include <asm/unistd.h> #include <asm/asm-offsets.h> #include <linux/threads.h> -#include <asm/setup.h> #include <asm/page.h> #include <asm/thread_info.h> diff --git a/arch/csky/lib/usercopy.c b/arch/csky/lib/usercopy.c index 3c9bd645e643..c5d394a0ae78 100644 --- a/arch/csky/lib/usercopy.c +++ b/arch/csky/lib/usercopy.c @@ -7,7 +7,70 @@ unsigned long raw_copy_from_user(void *to, const void *from, unsigned long n) { - ___copy_from_user(to, from, n); + int tmp, nsave; + + __asm__ __volatile__( + "0: cmpnei %1, 0 \n" + " bf 7f \n" + " mov %3, %1 \n" + " or %3, %2 \n" + " andi %3, 3 \n" + " cmpnei %3, 0 \n" + " bf 1f \n" + " br 5f \n" + "1: cmplti %0, 16 \n" + " bt 3f \n" + "2: ldw %3, (%2, 0) \n" + "10: ldw %4, (%2, 4) \n" + " stw %3, (%1, 0) \n" + " stw %4, (%1, 4) \n" + "11: ldw %3, (%2, 8) \n" + "12: ldw %4, (%2, 12) \n" + " stw %3, (%1, 8) \n" + " stw %4, (%1, 12) \n" + " addi %2, 16 \n" + " addi %1, 16 \n" + " subi %0, 16 \n" + " br 1b \n" + "3: cmplti %0, 4 \n" + " bt 5f \n" + "4: ldw %3, (%2, 0) \n" + " stw %3, (%1, 0) \n" + " addi %2, 4 \n" + " addi %1, 4 \n" + " subi %0, 4 \n" + " br 3b \n" + "5: cmpnei %0, 0 \n" + " bf 7f \n" + "6: ldb %3, (%2, 0) \n" + " stb %3, (%1, 0) \n" + " addi %2, 1 \n" + " addi %1, 1 \n" + " subi %0, 1 \n" + " br 5b \n" + "8: stw %3, (%1, 0) \n" + " subi %0, 4 \n" + " bf 7f \n" + "9: subi %0, 8 \n" + " bf 7f \n" + "13: stw %3, (%1, 8) \n" + " subi %0, 12 \n" + " bf 7f \n" + ".section __ex_table, \"a\" \n" + ".align 2 \n" + ".long 2b, 7f \n" + ".long 4b, 7f \n" + ".long 6b, 7f \n" + ".long 10b, 8b \n" + ".long 11b, 9b \n" + ".long 12b,13b \n" + ".previous \n" + "7: \n" + : "=r"(n), "=r"(to), "=r"(from), "=r"(nsave), + "=r"(tmp) + : "0"(n), "1"(to), "2"(from) + : "memory"); + return n; } EXPORT_SYMBOL(raw_copy_from_user); @@ -15,48 +78,70 @@ EXPORT_SYMBOL(raw_copy_from_user); unsigned long raw_copy_to_user(void *to, const void *from, unsigned long n) { - ___copy_to_user(to, from, n); + int w0, w1, w2, w3; + + __asm__ __volatile__( + "0: cmpnei %1, 0 \n" + " bf 8f \n" + " mov %3, %1 \n" + " or %3, %2 \n" + " andi %3, 3 \n" + " cmpnei %3, 0 \n" + " bf 1f \n" + " br 5f \n" + "1: cmplti %0, 16 \n" /* 4W */ + " bt 3f \n" + " ldw %3, (%2, 0) \n" + " ldw %4, (%2, 4) \n" + " ldw %5, (%2, 8) \n" + " ldw %6, (%2, 12) \n" + "2: stw %3, (%1, 0) \n" + "9: stw %4, (%1, 4) \n" + "10: stw %5, (%1, 8) \n" + "11: stw %6, (%1, 12) \n" + " addi %2, 16 \n" + " addi %1, 16 \n" + " subi %0, 16 \n" + " br 1b \n" + "3: cmplti %0, 4 \n" /* 1W */ + " bt 5f \n" + " ldw %3, (%2, 0) \n" + "4: stw %3, (%1, 0) \n" + " addi %2, 4 \n" + " addi %1, 4 \n" + " subi %0, 4 \n" + " br 3b \n" + "5: cmpnei %0, 0 \n" /* 1B */ + " bf 13f \n" + " ldb %3, (%2, 0) \n" + "6: stb %3, (%1, 0) \n" + " addi %2, 1 \n" + " addi %1, 1 \n" + " subi %0, 1 \n" + " br 5b \n" + "7: subi %0, 4 \n" + "8: subi %0, 4 \n" + "12: subi %0, 4 \n" + " br 13f \n" + ".section __ex_table, \"a\" \n" + ".align 2 \n" + ".long 2b, 13f \n" + ".long 4b, 13f \n" + ".long 6b, 13f \n" + ".long 9b, 12b \n" + ".long 10b, 8b \n" + ".long 11b, 7b \n" + ".previous \n" + "13: \n" + : "=r"(n), "=r"(to), "=r"(from), "=r"(w0), + "=r"(w1), "=r"(w2), "=r"(w3) + : "0"(n), "1"(to), "2"(from) + : "memory"); + return n; } EXPORT_SYMBOL(raw_copy_to_user); - -/* - * copy a null terminated string from userspace. - */ -#define __do_strncpy_from_user(dst, src, count, res) \ -do { \ - int tmp; \ - long faultres; \ - asm volatile( \ - " cmpnei %3, 0 \n" \ - " bf 4f \n" \ - "1: cmpnei %1, 0 \n" \ - " bf 5f \n" \ - "2: ldb %4, (%3, 0) \n" \ - " stb %4, (%2, 0) \n" \ - " cmpnei %4, 0 \n" \ - " bf 3f \n" \ - " addi %3, 1 \n" \ - " addi %2, 1 \n" \ - " subi %1, 1 \n" \ - " br 1b \n" \ - "3: subu %0, %1 \n" \ - " br 5f \n" \ - "4: mov %0, %5 \n" \ - " br 5f \n" \ - ".section __ex_table, \"a\" \n" \ - ".align 2 \n" \ - ".long 2b, 4b \n" \ - ".previous \n" \ - "5: \n" \ - : "=r"(res), "=r"(count), "=r"(dst), \ - "=r"(src), "=r"(tmp), "=r"(faultres) \ - : "5"(-EFAULT), "0"(count), "1"(count), \ - "2"(dst), "3"(src) \ - : "memory", "cc"); \ -} while (0) - /* * __strncpy_from_user: - Copy a NUL terminated string from userspace, * with less checking. @@ -80,43 +165,43 @@ do { \ */ long __strncpy_from_user(char *dst, const char *src, long count) { - long res; + long res, faultres; + int tmp; - __do_strncpy_from_user(dst, src, count, res); - return res; -} -EXPORT_SYMBOL(__strncpy_from_user); - -/* - * strncpy_from_user: - Copy a NUL terminated string from userspace. - * @dst: Destination address, in kernel space. This buffer must be at - * least @count bytes long. - * @src: Source address, in user space. - * @count: Maximum number of bytes to copy, including the trailing NUL. - * - * Copies a NUL-terminated string from userspace to kernel space. - * - * On success, returns the length of the string (not including the trailing - * NUL). - * - * If access to userspace fails, returns -EFAULT (some data may have been - * copied). - * - * If @count is smaller than the length of the string, copies @count bytes - * and returns @count. - */ -long strncpy_from_user(char *dst, const char *src, long count) -{ - long res = -EFAULT; + __asm__ __volatile__( + " cmpnei %3, 0 \n" + " bf 4f \n" + "1: cmpnei %1, 0 \n" + " bf 5f \n" + "2: ldb %4, (%3, 0) \n" + " stb %4, (%2, 0) \n" + " cmpnei %4, 0 \n" + " bf 3f \n" + " addi %3, 1 \n" + " addi %2, 1 \n" + " subi %1, 1 \n" + " br 1b \n" + "3: subu %0, %1 \n" + " br 5f \n" + "4: mov %0, %5 \n" + " br 5f \n" + ".section __ex_table, \"a\" \n" + ".align 2 \n" + ".long 2b, 4b \n" + ".previous \n" + "5: \n" + : "=r"(res), "=r"(count), "=r"(dst), + "=r"(src), "=r"(tmp), "=r"(faultres) + : "5"(-EFAULT), "0"(count), "1"(count), + "2"(dst), "3"(src) + : "memory"); - if (access_ok(src, 1)) - __do_strncpy_from_user(dst, src, count, res); return res; } -EXPORT_SYMBOL(strncpy_from_user); +EXPORT_SYMBOL(__strncpy_from_user); /* - * strlen_user: - Get the size of a string in user space. + * strnlen_user: - Get the size of a string in user space. * @str: The string to measure. * @n: The maximum valid length * @@ -126,14 +211,11 @@ EXPORT_SYMBOL(strncpy_from_user); * On exception, returns 0. * If the string is too long, returns a value greater than @n. */ -long strnlen_user(const char *s, long n) +long __strnlen_user(const char *s, long n) { unsigned long res, tmp; - if (s == NULL) - return 0; - - asm volatile( + __asm__ __volatile__( " cmpnei %1, 0 \n" " bf 3f \n" "1: cmpnei %0, 0 \n" @@ -156,87 +238,11 @@ long strnlen_user(const char *s, long n) "5: \n" : "=r"(n), "=r"(s), "=r"(res), "=r"(tmp) : "0"(n), "1"(s), "2"(n) - : "memory", "cc"); + : "memory"); return res; } -EXPORT_SYMBOL(strnlen_user); - -#define __do_clear_user(addr, size) \ -do { \ - int __d0, zvalue, tmp; \ - \ - asm volatile( \ - "0: cmpnei %1, 0 \n" \ - " bf 7f \n" \ - " mov %3, %1 \n" \ - " andi %3, 3 \n" \ - " cmpnei %3, 0 \n" \ - " bf 1f \n" \ - " br 5f \n" \ - "1: cmplti %0, 32 \n" /* 4W */ \ - " bt 3f \n" \ - "8: stw %2, (%1, 0) \n" \ - "10: stw %2, (%1, 4) \n" \ - "11: stw %2, (%1, 8) \n" \ - "12: stw %2, (%1, 12) \n" \ - "13: stw %2, (%1, 16) \n" \ - "14: stw %2, (%1, 20) \n" \ - "15: stw %2, (%1, 24) \n" \ - "16: stw %2, (%1, 28) \n" \ - " addi %1, 32 \n" \ - " subi %0, 32 \n" \ - " br 1b \n" \ - "3: cmplti %0, 4 \n" /* 1W */ \ - " bt 5f \n" \ - "4: stw %2, (%1, 0) \n" \ - " addi %1, 4 \n" \ - " subi %0, 4 \n" \ - " br 3b \n" \ - "5: cmpnei %0, 0 \n" /* 1B */ \ - "9: bf 7f \n" \ - "6: stb %2, (%1, 0) \n" \ - " addi %1, 1 \n" \ - " subi %0, 1 \n" \ - " br 5b \n" \ - ".section __ex_table,\"a\" \n" \ - ".align 2 \n" \ - ".long 8b, 9b \n" \ - ".long 10b, 9b \n" \ - ".long 11b, 9b \n" \ - ".long 12b, 9b \n" \ - ".long 13b, 9b \n" \ - ".long 14b, 9b \n" \ - ".long 15b, 9b \n" \ - ".long 16b, 9b \n" \ - ".long 4b, 9b \n" \ - ".long 6b, 9b \n" \ - ".previous \n" \ - "7: \n" \ - : "=r"(size), "=r" (__d0), \ - "=r"(zvalue), "=r"(tmp) \ - : "0"(size), "1"(addr), "2"(0) \ - : "memory", "cc"); \ -} while (0) - -/* - * clear_user: - Zero a block of memory in user space. - * @to: Destination address, in user space. - * @n: Number of bytes to zero. - * - * Zero a block of memory in user space. - * - * Returns number of bytes that could not be cleared. - * On success, this will be zero. - */ -unsigned long -clear_user(void __user *to, unsigned long n) -{ - if (access_ok(to, n)) - __do_clear_user(to, n); - return n; -} -EXPORT_SYMBOL(clear_user); +EXPORT_SYMBOL(__strnlen_user); /* * __clear_user: - Zero a block of memory in user space, with less checking. @@ -252,7 +258,59 @@ EXPORT_SYMBOL(clear_user); unsigned long __clear_user(void __user *to, unsigned long n) { - __do_clear_user(to, n); + int data, value, tmp; + + __asm__ __volatile__( + "0: cmpnei %1, 0 \n" + " bf 7f \n" + " mov %3, %1 \n" + " andi %3, 3 \n" + " cmpnei %3, 0 \n" + " bf 1f \n" + " br 5f \n" + "1: cmplti %0, 32 \n" /* 4W */ + " bt 3f \n" + "8: stw %2, (%1, 0) \n" + "10: stw %2, (%1, 4) \n" + "11: stw %2, (%1, 8) \n" + "12: stw %2, (%1, 12) \n" + "13: stw %2, (%1, 16) \n" + "14: stw %2, (%1, 20) \n" + "15: stw %2, (%1, 24) \n" + "16: stw %2, (%1, 28) \n" + " addi %1, 32 \n" + " subi %0, 32 \n" + " br 1b \n" + "3: cmplti %0, 4 \n" /* 1W */ + " bt 5f \n" + "4: stw %2, (%1, 0) \n" + " addi %1, 4 \n" + " subi %0, 4 \n" + " br 3b \n" + "5: cmpnei %0, 0 \n" /* 1B */ + "9: bf 7f \n" + "6: stb %2, (%1, 0) \n" + " addi %1, 1 \n" + " subi %0, 1 \n" + " br 5b \n" + ".section __ex_table,\"a\" \n" + ".align 2 \n" + ".long 8b, 9b \n" + ".long 10b, 9b \n" + ".long 11b, 9b \n" + ".long 12b, 9b \n" + ".long 13b, 9b \n" + ".long 14b, 9b \n" + ".long 15b, 9b \n" + ".long 16b, 9b \n" + ".long 4b, 9b \n" + ".long 6b, 9b \n" + ".previous \n" + "7: \n" + : "=r"(n), "=r" (data), "=r"(value), "=r"(tmp) + : "0"(n), "1"(to), "2"(0) + : "memory"); + return n; } EXPORT_SYMBOL(__clear_user); diff --git a/arch/csky/mm/fault.c b/arch/csky/mm/fault.c index 1482de56f4f7..466ad949818a 100644 --- a/arch/csky/mm/fault.c +++ b/arch/csky/mm/fault.c @@ -12,7 +12,7 @@ int fixup_exception(struct pt_regs *regs) fixup = search_exception_tables(instruction_pointer(regs)); if (fixup) { - regs->pc = fixup->nextinsn; + regs->pc = fixup->fixup; return 1; } diff --git a/arch/csky/mm/syscache.c b/arch/csky/mm/syscache.c index ffade2f9a4c8..4e51d63850c4 100644 --- a/arch/csky/mm/syscache.c +++ b/arch/csky/mm/syscache.c @@ -17,6 +17,7 @@ SYSCALL_DEFINE3(cacheflush, flush_icache_mm_range(current->mm, (unsigned long)addr, (unsigned long)addr + bytes); + fallthrough; case DCACHE: dcache_wb_range((unsigned long)addr, (unsigned long)addr + bytes); diff --git a/arch/h8300/include/asm/bitops.h b/arch/h8300/include/asm/bitops.h index 7aa16c732aa9..c867a80cab5b 100644 --- a/arch/h8300/include/asm/bitops.h +++ b/arch/h8300/include/asm/bitops.h @@ -9,6 +9,10 @@ #include <linux/compiler.h> +#include <asm-generic/bitops/fls.h> +#include <asm-generic/bitops/__fls.h> +#include <asm-generic/bitops/fls64.h> + #ifdef __KERNEL__ #ifndef _LINUX_BITOPS_H @@ -173,8 +177,4 @@ static inline unsigned long __ffs(unsigned long word) #endif /* __KERNEL__ */ -#include <asm-generic/bitops/fls.h> -#include <asm-generic/bitops/__fls.h> -#include <asm-generic/bitops/fls64.h> - #endif /* _H8300_BITOPS_H */ diff --git a/arch/hexagon/Makefile b/arch/hexagon/Makefile index c168c6980d05..74b644ea8a00 100644 --- a/arch/hexagon/Makefile +++ b/arch/hexagon/Makefile @@ -10,6 +10,9 @@ LDFLAGS_vmlinux += -G0 # Do not use single-byte enums; these will overflow. KBUILD_CFLAGS += -fno-short-enums +# We must use long-calls: +KBUILD_CFLAGS += -mlong-calls + # Modules must use either long-calls, or use pic/plt. # Use long-calls for now, it's easier. And faster. # KBUILD_CFLAGS_MODULE += -fPIC @@ -30,9 +33,6 @@ TIR_NAME := r19 KBUILD_CFLAGS += -ffixed-$(TIR_NAME) -DTHREADINFO_REG=$(TIR_NAME) -D__linux__ KBUILD_AFLAGS += -DTHREADINFO_REG=$(TIR_NAME) -LIBGCC := $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name 2>/dev/null) -libs-y += $(LIBGCC) - head-y := arch/hexagon/kernel/head.o core-y += arch/hexagon/kernel/ \ diff --git a/arch/hexagon/configs/comet_defconfig b/arch/hexagon/configs/comet_defconfig index f19ae2ab0aaa..9b2b1cc0794a 100644 --- a/arch/hexagon/configs/comet_defconfig +++ b/arch/hexagon/configs/comet_defconfig @@ -34,7 +34,6 @@ CONFIG_NET_ETHERNET=y # CONFIG_SERIO is not set # CONFIG_CONSOLE_TRANSLATIONS is not set CONFIG_LEGACY_PTY_COUNT=64 -# CONFIG_DEVKMEM is not set # CONFIG_HW_RANDOM is not set CONFIG_SPI=y CONFIG_SPI_DEBUG=y @@ -81,4 +80,3 @@ CONFIG_FRAME_WARN=0 CONFIG_MAGIC_SYSRQ=y CONFIG_DEBUG_FS=y # CONFIG_SCHED_DEBUG is not set -CONFIG_DEBUG_INFO=y diff --git a/arch/hexagon/include/asm/futex.h b/arch/hexagon/include/asm/futex.h index 6b9c554aee78..9fb00a0ae89f 100644 --- a/arch/hexagon/include/asm/futex.h +++ b/arch/hexagon/include/asm/futex.h @@ -21,7 +21,7 @@ "3:\n" \ ".section .fixup,\"ax\"\n" \ "4: %1 = #%5;\n" \ - " jump 3b\n" \ + " jump ##3b\n" \ ".previous\n" \ ".section __ex_table,\"a\"\n" \ ".long 1b,4b,2b,4b\n" \ @@ -90,7 +90,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval, "3:\n" ".section .fixup,\"ax\"\n" "4: %0 = #%6\n" - " jump 3b\n" + " jump ##3b\n" ".previous\n" ".section __ex_table,\"a\"\n" ".long 1b,4b,2b,4b\n" diff --git a/arch/hexagon/include/asm/io.h b/arch/hexagon/include/asm/io.h index bda2a9c2df78..c33241425a5c 100644 --- a/arch/hexagon/include/asm/io.h +++ b/arch/hexagon/include/asm/io.h @@ -64,7 +64,6 @@ static inline void *phys_to_virt(unsigned long address) * convert a physical pointer to a virtual kernel pointer for * /dev/mem access. */ -#define xlate_dev_kmem_ptr(p) __va(p) #define xlate_dev_mem_ptr(p) __va(p) /* diff --git a/arch/hexagon/include/asm/timex.h b/arch/hexagon/include/asm/timex.h index 78338d8ada83..8d4ec76fceb4 100644 --- a/arch/hexagon/include/asm/timex.h +++ b/arch/hexagon/include/asm/timex.h @@ -8,6 +8,7 @@ #include <asm-generic/timex.h> #include <asm/timer-regs.h> +#include <asm/hexagon_vm.h> /* Using TCX0 as our clock. CLOCK_TICK_RATE scheduled to be removed. */ #define CLOCK_TICK_RATE TCX0_CLK_RATE @@ -16,7 +17,7 @@ static inline int read_current_timer(unsigned long *timer_val) { - *timer_val = (unsigned long) __vmgettime(); + *timer_val = __vmgettime(); return 0; } diff --git a/arch/hexagon/kernel/hexagon_ksyms.c b/arch/hexagon/kernel/hexagon_ksyms.c index 6fb1aaab1c29..35545a7386a0 100644 --- a/arch/hexagon/kernel/hexagon_ksyms.c +++ b/arch/hexagon/kernel/hexagon_ksyms.c @@ -35,8 +35,8 @@ EXPORT_SYMBOL(_dflt_cache_att); DECLARE_EXPORT(__hexagon_memcpy_likely_aligned_min32bytes_mult8bytes); /* Additional functions */ -DECLARE_EXPORT(__divsi3); -DECLARE_EXPORT(__modsi3); -DECLARE_EXPORT(__udivsi3); -DECLARE_EXPORT(__umodsi3); +DECLARE_EXPORT(__hexagon_divsi3); +DECLARE_EXPORT(__hexagon_modsi3); +DECLARE_EXPORT(__hexagon_udivsi3); +DECLARE_EXPORT(__hexagon_umodsi3); DECLARE_EXPORT(csum_tcpudp_magic); diff --git a/arch/hexagon/kernel/ptrace.c b/arch/hexagon/kernel/ptrace.c index a5a89e944257..8975f9b4cedf 100644 --- a/arch/hexagon/kernel/ptrace.c +++ b/arch/hexagon/kernel/ptrace.c @@ -35,7 +35,7 @@ void user_disable_single_step(struct task_struct *child) static int genregs_get(struct task_struct *target, const struct user_regset *regset, - srtuct membuf to) + struct membuf to) { struct pt_regs *regs = task_pt_regs(target); @@ -54,7 +54,7 @@ static int genregs_get(struct task_struct *target, membuf_store(&to, regs->m0); membuf_store(&to, regs->m1); membuf_store(&to, regs->usr); - membuf_store(&to, regs->p3_0); + membuf_store(&to, regs->preds); membuf_store(&to, regs->gp); membuf_store(&to, regs->ugp); membuf_store(&to, pt_elr(regs)); // pc diff --git a/arch/hexagon/lib/Makefile b/arch/hexagon/lib/Makefile index 54be529d17a2..a64641e89d5f 100644 --- a/arch/hexagon/lib/Makefile +++ b/arch/hexagon/lib/Makefile @@ -2,4 +2,5 @@ # # Makefile for hexagon-specific library files. # -obj-y = checksum.o io.o memcpy.o memset.o +obj-y = checksum.o io.o memcpy.o memset.o memcpy_likely_aligned.o \ + divsi3.o modsi3.o udivsi3.o umodsi3.o diff --git a/arch/hexagon/lib/divsi3.S b/arch/hexagon/lib/divsi3.S new file mode 100644 index 000000000000..783e09424c2c --- /dev/null +++ b/arch/hexagon/lib/divsi3.S @@ -0,0 +1,67 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + */ + +#include <linux/linkage.h> + +SYM_FUNC_START(__hexagon_divsi3) + { + p0 = cmp.gt(r0,#-1) + p1 = cmp.gt(r1,#-1) + r3:2 = vabsw(r1:0) + } + { + p3 = xor(p0,p1) + r4 = sub(r2,r3) + r6 = cl0(r2) + p0 = cmp.gtu(r3,r2) + } + { + r0 = mux(p3,#-1,#1) + r7 = cl0(r3) + p1 = cmp.gtu(r3,r4) + } + { + r0 = mux(p0,#0,r0) + p0 = or(p0,p1) + if (p0.new) jumpr:nt r31 + r6 = sub(r7,r6) + } + { + r7 = r6 + r5:4 = combine(#1,r3) + r6 = add(#1,lsr(r6,#1)) + p0 = cmp.gtu(r6,#4) + } + { + r5:4 = vaslw(r5:4,r7) + if (!p0) r6 = #3 + } + { + loop0(1f,r6) + r7:6 = vlsrw(r5:4,#1) + r1:0 = #0 + } + .falign +1: + { + r5:4 = vlsrw(r5:4,#2) + if (!p0.new) r0 = add(r0,r5) + if (!p0.new) r2 = sub(r2,r4) + p0 = cmp.gtu(r4,r2) + } + { + r7:6 = vlsrw(r7:6,#2) + if (!p0.new) r0 = add(r0,r7) + if (!p0.new) r2 = sub(r2,r6) + p0 = cmp.gtu(r6,r2) + }:endloop0 + { + if (!p0) r0 = add(r0,r7) + } + { + if (p3) r0 = sub(r1,r0) + jumpr r31 + } +SYM_FUNC_END(__hexagon_divsi3) diff --git a/arch/hexagon/lib/memcpy_likely_aligned.S b/arch/hexagon/lib/memcpy_likely_aligned.S new file mode 100644 index 000000000000..6a541fb90a54 --- /dev/null +++ b/arch/hexagon/lib/memcpy_likely_aligned.S @@ -0,0 +1,56 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + */ + +#include <linux/linkage.h> + +SYM_FUNC_START(__hexagon_memcpy_likely_aligned_min32bytes_mult8bytes) + { + p0 = bitsclr(r1,#7) + p0 = bitsclr(r0,#7) + if (p0.new) r5:4 = memd(r1) + if (p0.new) r7:6 = memd(r1+#8) + } + { + if (!p0) jump:nt .Lmemcpy_call + if (p0) r9:8 = memd(r1+#16) + if (p0) r11:10 = memd(r1+#24) + p0 = cmp.gtu(r2,#64) + } + { + if (p0) jump:nt .Lmemcpy_call + if (!p0) memd(r0) = r5:4 + if (!p0) memd(r0+#8) = r7:6 + p0 = cmp.gtu(r2,#32) + } + { + p1 = cmp.gtu(r2,#40) + p2 = cmp.gtu(r2,#48) + if (p0) r13:12 = memd(r1+#32) + if (p1.new) r15:14 = memd(r1+#40) + } + { + memd(r0+#16) = r9:8 + memd(r0+#24) = r11:10 + } + { + if (p0) memd(r0+#32) = r13:12 + if (p1) memd(r0+#40) = r15:14 + if (!p2) jumpr:t r31 + } + { + p0 = cmp.gtu(r2,#56) + r5:4 = memd(r1+#48) + if (p0.new) r7:6 = memd(r1+#56) + } + { + memd(r0+#48) = r5:4 + if (p0) memd(r0+#56) = r7:6 + jumpr r31 + } + +.Lmemcpy_call: + jump memcpy + +SYM_FUNC_END(__hexagon_memcpy_likely_aligned_min32bytes_mult8bytes) diff --git a/arch/hexagon/lib/modsi3.S b/arch/hexagon/lib/modsi3.S new file mode 100644 index 000000000000..9ea1c86efac2 --- /dev/null +++ b/arch/hexagon/lib/modsi3.S @@ -0,0 +1,46 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + */ + +#include <linux/linkage.h> + +SYM_FUNC_START(__hexagon_modsi3) + { + p2 = cmp.ge(r0,#0) + r2 = abs(r0) + r1 = abs(r1) + } + { + r3 = cl0(r2) + r4 = cl0(r1) + p0 = cmp.gtu(r1,r2) + } + { + r3 = sub(r4,r3) + if (p0) jumpr r31 + } + { + p1 = cmp.eq(r3,#0) + loop0(1f,r3) + r0 = r2 + r2 = lsl(r1,r3) + } + .falign +1: + { + p0 = cmp.gtu(r2,r0) + if (!p0.new) r0 = sub(r0,r2) + r2 = lsr(r2,#1) + if (p1) r1 = #0 + }:endloop0 + { + p0 = cmp.gtu(r2,r0) + if (!p0.new) r0 = sub(r0,r1) + if (p2) jumpr r31 + } + { + r0 = neg(r0) + jumpr r31 + } +SYM_FUNC_END(__hexagon_modsi3) diff --git a/arch/hexagon/lib/udivsi3.S b/arch/hexagon/lib/udivsi3.S new file mode 100644 index 000000000000..477f27b9311c --- /dev/null +++ b/arch/hexagon/lib/udivsi3.S @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + */ + +#include <linux/linkage.h> + +SYM_FUNC_START(__hexagon_udivsi3) + { + r2 = cl0(r0) + r3 = cl0(r1) + r5:4 = combine(#1,#0) + p0 = cmp.gtu(r1,r0) + } + { + r6 = sub(r3,r2) + r4 = r1 + r1:0 = combine(r0,r4) + if (p0) jumpr r31 + } + { + r3:2 = vlslw(r5:4,r6) + loop0(1f,r6) + } + .falign +1: + { + p0 = cmp.gtu(r2,r1) + if (!p0.new) r1 = sub(r1,r2) + if (!p0.new) r0 = add(r0,r3) + r3:2 = vlsrw(r3:2,#1) + }:endloop0 + { + p0 = cmp.gtu(r2,r1) + if (!p0.new) r0 = add(r0,r3) + jumpr r31 + } +SYM_FUNC_END(__hexagon_udivsi3) diff --git a/arch/hexagon/lib/umodsi3.S b/arch/hexagon/lib/umodsi3.S new file mode 100644 index 000000000000..280bf06a55e7 --- /dev/null +++ b/arch/hexagon/lib/umodsi3.S @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + */ + +#include <linux/linkage.h> + +SYM_FUNC_START(__hexagon_umodsi3) + { + r2 = cl0(r0) + r3 = cl0(r1) + p0 = cmp.gtu(r1,r0) + } + { + r2 = sub(r3,r2) + if (p0) jumpr r31 + } + { + loop0(1f,r2) + p1 = cmp.eq(r2,#0) + r2 = lsl(r1,r2) + } + .falign +1: + { + p0 = cmp.gtu(r2,r0) + if (!p0.new) r0 = sub(r0,r2) + r2 = lsr(r2,#1) + if (p1) r1 = #0 + }:endloop0 + { + p0 = cmp.gtu(r2,r0) + if (!p0.new) r0 = sub(r0,r1) + jumpr r31 + } +SYM_FUNC_END(__hexagon_umodsi3) diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index 81e2b893b1e7..279252e3e0f7 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig @@ -13,6 +13,8 @@ config IA64 select ARCH_MIGHT_HAVE_PC_SERIO select ACPI select ACPI_NUMA if NUMA + select ARCH_ENABLE_MEMORY_HOTPLUG + select ARCH_ENABLE_MEMORY_HOTREMOVE select ARCH_SUPPORTS_ACPI select ACPI_SYSTEM_POWER_STATES_SUPPORT if ACPI select ARCH_MIGHT_HAVE_ACPI_PDC if ACPI @@ -32,6 +34,7 @@ config IA64 select TTY select HAVE_ARCH_TRACEHOOK select HAVE_VIRT_CPU_ACCOUNTING + select HUGETLB_PAGE_SIZE_VARIABLE if HUGETLB_PAGE select VIRT_TO_BUS select GENERIC_IRQ_PROBE select GENERIC_PENDING_IRQ if SMP @@ -82,11 +85,6 @@ config STACKTRACE_SUPPORT config GENERIC_LOCKBREAK def_bool n -config HUGETLB_PAGE_SIZE_VARIABLE - bool - depends on HUGETLB_PAGE - default y - config GENERIC_CALIBRATE_DELAY bool default y @@ -250,12 +248,6 @@ config HOTPLUG_CPU can be controlled through /sys/devices/system/cpu/cpu#. Say N if you want to disable CPU hotplug. -config ARCH_ENABLE_MEMORY_HOTPLUG - def_bool y - -config ARCH_ENABLE_MEMORY_HOTREMOVE - def_bool y - config SCHED_SMT bool "SMT scheduler support" depends on SMP diff --git a/arch/ia64/include/asm/io.h b/arch/ia64/include/asm/io.h index 3d666a11a2de..6d93b923b379 100644 --- a/arch/ia64/include/asm/io.h +++ b/arch/ia64/include/asm/io.h @@ -277,7 +277,6 @@ extern void memset_io(volatile void __iomem *s, int c, long n); #define memcpy_fromio memcpy_fromio #define memcpy_toio memcpy_toio #define memset_io memset_io -#define xlate_dev_kmem_ptr xlate_dev_kmem_ptr #define xlate_dev_mem_ptr xlate_dev_mem_ptr #include <asm-generic/io.h> #undef PCI_IOBASE diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h index 179243c3dfc7..e19d2dcc0ced 100644 --- a/arch/ia64/include/asm/uaccess.h +++ b/arch/ia64/include/asm/uaccess.h @@ -272,22 +272,4 @@ xlate_dev_mem_ptr(phys_addr_t p) return ptr; } -/* - * Convert a virtual cached kernel memory pointer to an uncached pointer - */ -static __inline__ void * -xlate_dev_kmem_ptr(void *p) -{ - struct page *page; - void *ptr; - - page = virt_to_page((unsigned long)p); - if (PageUncached(page)) - ptr = (void *)__pa(p) + __IA64_UNCACHED_OFFSET; - else - ptr = p; - - return ptr; -} - #endif /* _ASM_IA64_UACCESS_H */ diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c index b331f94d20ac..f993cb36c062 100644 --- a/arch/ia64/mm/hugetlbpage.c +++ b/arch/ia64/mm/hugetlbpage.c @@ -25,7 +25,8 @@ unsigned int hpage_shift = HPAGE_SHIFT_DEFAULT; EXPORT_SYMBOL(hpage_shift); pte_t * -huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz) +huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, + unsigned long addr, unsigned long sz) { unsigned long taddr = htlbpage_to_page(addr); pgd_t *pgd; diff --git a/arch/m68k/atari/time.c b/arch/m68k/atari/time.c index 1068670cb741..7e44d0e9d0f8 100644 --- a/arch/m68k/atari/time.c +++ b/arch/m68k/atari/time.c @@ -317,10 +317,3 @@ int atari_tt_hwclk( int op, struct rtc_time *t ) return( 0 ); } - -/* - * Local variables: - * c-indent-level: 4 - * tab-width: 8 - * End: - */ diff --git a/arch/m68k/coldfire/intc-simr.c b/arch/m68k/coldfire/intc-simr.c index 15c4b7a6e38f..f7c2c41b3156 100644 --- a/arch/m68k/coldfire/intc-simr.c +++ b/arch/m68k/coldfire/intc-simr.c @@ -68,9 +68,9 @@ static void intc_irq_mask(struct irq_data *d) { unsigned int irq = d->irq - MCFINT_VECBASE; - if (MCFINTC2_SIMR && (irq > 128)) + if (MCFINTC2_SIMR && (irq > 127)) __raw_writeb(irq - 128, MCFINTC2_SIMR); - else if (MCFINTC1_SIMR && (irq > 64)) + else if (MCFINTC1_SIMR && (irq > 63)) __raw_writeb(irq - 64, MCFINTC1_SIMR); else __raw_writeb(irq, MCFINTC0_SIMR); @@ -80,9 +80,9 @@ static void intc_irq_unmask(struct irq_data *d) { unsigned int irq = d->irq - MCFINT_VECBASE; - if (MCFINTC2_CIMR && (irq > 128)) + if (MCFINTC2_CIMR && (irq > 127)) __raw_writeb(irq - 128, MCFINTC2_CIMR); - else if (MCFINTC1_CIMR && (irq > 64)) + else if (MCFINTC1_CIMR && (irq > 63)) __raw_writeb(irq - 64, MCFINTC1_CIMR); else __raw_writeb(irq, MCFINTC0_CIMR); @@ -115,9 +115,9 @@ static unsigned int intc_irq_startup(struct irq_data *d) } irq -= MCFINT_VECBASE; - if (MCFINTC2_ICR0 && (irq > 128)) + if (MCFINTC2_ICR0 && (irq > 127)) __raw_writeb(5, MCFINTC2_ICR0 + irq - 128); - else if (MCFINTC1_ICR0 && (irq > 64)) + else if (MCFINTC1_ICR0 && (irq > 63)) __raw_writeb(5, MCFINTC1_ICR0 + irq - 64); else __raw_writeb(5, MCFINTC0_ICR0 + irq); diff --git a/arch/m68k/configs/amcore_defconfig b/arch/m68k/configs/amcore_defconfig index 3a84f24d41c8..6d9ed2198170 100644 --- a/arch/m68k/configs/amcore_defconfig +++ b/arch/m68k/configs/amcore_defconfig @@ -60,7 +60,6 @@ CONFIG_DM9000=y # CONFIG_VT is not set # CONFIG_UNIX98_PTYS is not set # CONFIG_DEVMEM is not set -# CONFIG_DEVKMEM is not set CONFIG_SERIAL_MCF=y CONFIG_SERIAL_MCF_BAUDRATE=115200 CONFIG_SERIAL_MCF_CONSOLE=y diff --git a/arch/m68k/include/asm/bitops.h b/arch/m68k/include/asm/bitops.h index 10133a968c8e..7b414099e5fc 100644 --- a/arch/m68k/include/asm/bitops.h +++ b/arch/m68k/include/asm/bitops.h @@ -440,8 +440,6 @@ static inline unsigned long ffz(unsigned long word) #endif -#include <asm-generic/bitops/find.h> - #ifdef __KERNEL__ #if defined(CONFIG_CPU_HAS_NO_BITFIELDS) @@ -525,10 +523,12 @@ static inline int __fls(int x) #define __clear_bit_unlock clear_bit_unlock #include <asm-generic/bitops/ext2-atomic.h> -#include <asm-generic/bitops/le.h> #include <asm-generic/bitops/fls64.h> #include <asm-generic/bitops/sched.h> #include <asm-generic/bitops/hweight.h> +#include <asm-generic/bitops/le.h> #endif /* __KERNEL__ */ +#include <asm-generic/bitops/find.h> + #endif /* _M68K_BITOPS_H */ diff --git a/arch/m68k/include/asm/io_mm.h b/arch/m68k/include/asm/io_mm.h index 819f611dccf2..d41fa488453b 100644 --- a/arch/m68k/include/asm/io_mm.h +++ b/arch/m68k/include/asm/io_mm.h @@ -397,11 +397,6 @@ static inline void isa_delay(void) */ #define xlate_dev_mem_ptr(p) __va(p) -/* - * Convert a virtual cached pointer to an uncached pointer - */ -#define xlate_dev_kmem_ptr(p) p - #define readb_relaxed(addr) readb(addr) #define readw_relaxed(addr) readw(addr) #define readl_relaxed(addr) readl(addr) diff --git a/arch/microblaze/include/asm/ftrace.h b/arch/microblaze/include/asm/ftrace.h index 5db7f4489f05..6a92bed37794 100644 --- a/arch/microblaze/include/asm/ftrace.h +++ b/arch/microblaze/include/asm/ftrace.h @@ -13,7 +13,7 @@ extern void ftrace_call_graph(void); #endif #ifdef CONFIG_DYNAMIC_FTRACE -/* reloction of mcount call site is the same as the address */ +/* relocation of mcount call site is the same as the address */ static inline unsigned long ftrace_call_adjust(unsigned long addr) { return addr; diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 49a3c9cd1cb2..ed51970c08e7 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -19,6 +19,7 @@ config MIPS select ARCH_USE_MEMTEST select ARCH_USE_QUEUED_RWLOCKS select ARCH_USE_QUEUED_SPINLOCKS + select ARCH_SUPPORTS_HUGETLBFS if CPU_SUPPORTS_HUGEPAGES select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU select ARCH_WANT_IPC_PARSE_VERSION select ARCH_WANT_LD_ORPHAN_WARN @@ -1287,11 +1288,6 @@ config SYS_SUPPORTS_BIG_ENDIAN config SYS_SUPPORTS_LITTLE_ENDIAN bool -config SYS_SUPPORTS_HUGETLBFS - bool - depends on CPU_SUPPORTS_HUGEPAGES - default y - config MIPS_HUGE_TLB_SUPPORT def_bool HUGETLB_PAGE || TRANSPARENT_HUGEPAGE diff --git a/arch/mips/include/asm/io.h b/arch/mips/include/asm/io.h index 2c138450ad3b..6f5c86d2bab4 100644 --- a/arch/mips/include/asm/io.h +++ b/arch/mips/include/asm/io.h @@ -564,11 +564,6 @@ extern void (*_dma_cache_inv)(unsigned long start, unsigned long size); */ #define xlate_dev_mem_ptr(p) __va(p) -/* - * Convert a virtual cached pointer to an uncached pointer - */ -#define xlate_dev_kmem_ptr(p) p - void __ioread64_copy(void *to, const void __iomem *from, size_t count); #endif /* _ASM_IO_H */ diff --git a/arch/mips/mm/hugetlbpage.c b/arch/mips/mm/hugetlbpage.c index b9f76f433617..7eaff5b07873 100644 --- a/arch/mips/mm/hugetlbpage.c +++ b/arch/mips/mm/hugetlbpage.c @@ -21,8 +21,8 @@ #include <asm/tlb.h> #include <asm/tlbflush.h> -pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, - unsigned long sz) +pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, + unsigned long addr, unsigned long sz) { pgd_t *pgd; p4d_t *p4d; diff --git a/arch/nds32/include/asm/uaccess.h b/arch/nds32/include/asm/uaccess.h index 010ba5f1d7dd..d4cbf069dc22 100644 --- a/arch/nds32/include/asm/uaccess.h +++ b/arch/nds32/include/asm/uaccess.h @@ -260,7 +260,6 @@ do { \ extern unsigned long __arch_clear_user(void __user * addr, unsigned long n); extern long strncpy_from_user(char *dest, const char __user * src, long count); -extern __must_check long strlen_user(const char __user * str); extern __must_check long strnlen_user(const char __user * str, long n); extern unsigned long __arch_copy_from_user(void *to, const void __user * from, unsigned long n); diff --git a/arch/nds32/kernel/ftrace.c b/arch/nds32/kernel/ftrace.c index 414f8a780cc3..0e23e3a8df6b 100644 --- a/arch/nds32/kernel/ftrace.c +++ b/arch/nds32/kernel/ftrace.c @@ -236,7 +236,7 @@ void __naked return_to_handler(void) "bal ftrace_return_to_handler\n\t" "move $lp, $r0 \n\t" - /* restore state nedded by the ABI */ + /* restore state needed by the ABI */ "lmw.bim $r0,[$sp],$r1,#0x0 \n\t"); } diff --git a/arch/nios2/include/asm/uaccess.h b/arch/nios2/include/asm/uaccess.h index a741abbed6fb..ba9340e96fd4 100644 --- a/arch/nios2/include/asm/uaccess.h +++ b/arch/nios2/include/asm/uaccess.h @@ -83,7 +83,6 @@ raw_copy_to_user(void __user *to, const void *from, unsigned long n); extern long strncpy_from_user(char *__to, const char __user *__from, long __len); -extern __must_check long strlen_user(const char __user *str); extern __must_check long strnlen_user(const char __user *s, long n); /* Optimized macros */ diff --git a/arch/openrisc/configs/or1ksim_defconfig b/arch/openrisc/configs/or1ksim_defconfig index 75f2da324d0e..6e1e004047c7 100644 --- a/arch/openrisc/configs/or1ksim_defconfig +++ b/arch/openrisc/configs/or1ksim_defconfig @@ -43,7 +43,6 @@ CONFIG_MICREL_PHY=y # CONFIG_SERIO is not set # CONFIG_VT is not set # CONFIG_LEGACY_PTYS is not set -# CONFIG_DEVKMEM is not set CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250_CONSOLE=y CONFIG_SERIAL_OF_PLATFORM=y diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig index afc3b8d03572..bde9907bc5b2 100644 --- a/arch/parisc/Kconfig +++ b/arch/parisc/Kconfig @@ -12,6 +12,7 @@ config PARISC select ARCH_HAS_STRICT_KERNEL_RWX select ARCH_HAS_UBSAN_SANITIZE_ALL select ARCH_NO_SG_CHAIN + select ARCH_SUPPORTS_HUGETLBFS if PA20 select ARCH_SUPPORTS_MEMORY_FAILURE select DMA_OPS select RTC_CLASS @@ -138,10 +139,6 @@ config PGTABLE_LEVELS default 3 if 64BIT && PARISC_PAGE_SIZE_4KB default 2 -config SYS_SUPPORTS_HUGETLBFS - def_bool y if PA20 - - menu "Processor type and features" choice diff --git a/arch/parisc/include/asm/Kbuild b/arch/parisc/include/asm/Kbuild index 4406475a2304..e6e7f74c8ac9 100644 --- a/arch/parisc/include/asm/Kbuild +++ b/arch/parisc/include/asm/Kbuild @@ -1,7 +1,6 @@ # SPDX-License-Identifier: GPL-2.0 generated-y += syscall_table_32.h generated-y += syscall_table_64.h -generated-y += syscall_table_c32.h generic-y += kvm_para.h generic-y += mcs_spinlock.h generic-y += user.h diff --git a/arch/parisc/include/asm/io.h b/arch/parisc/include/asm/io.h index 8a11b8cf4719..0b5259102319 100644 --- a/arch/parisc/include/asm/io.h +++ b/arch/parisc/include/asm/io.h @@ -316,11 +316,6 @@ extern void iowrite64be(u64 val, void __iomem *addr); */ #define xlate_dev_mem_ptr(p) __va(p) -/* - * Convert a virtual cached pointer to an uncached pointer - */ -#define xlate_dev_kmem_ptr(p) p - extern int devmem_is_allowed(unsigned long pfn); #endif diff --git a/arch/parisc/include/asm/pdc_chassis.h b/arch/parisc/include/asm/pdc_chassis.h index ae3e108d22ad..d6d82f53d3d0 100644 --- a/arch/parisc/include/asm/pdc_chassis.h +++ b/arch/parisc/include/asm/pdc_chassis.h @@ -365,4 +365,3 @@ void parisc_pdc_chassis_init(void); PDC_CHASSIS_EOM_SET ) #endif /* _PARISC_PDC_CHASSIS_H */ -/* vim: set ts=8 */ diff --git a/arch/parisc/kernel/setup.c b/arch/parisc/kernel/setup.c index e320bae501d3..3fb86ee507dd 100644 --- a/arch/parisc/kernel/setup.c +++ b/arch/parisc/kernel/setup.c @@ -268,7 +268,7 @@ static int __init parisc_init_resources(void) result = request_resource(&iomem_resource, &local_broadcast); if (result < 0) { printk(KERN_ERR - "%s: failed to claim %saddress space!\n", + "%s: failed to claim %s address space!\n", __FILE__, local_broadcast.name); return result; } diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S index 322503780db6..3f24a0af1e04 100644 --- a/arch/parisc/kernel/syscall.S +++ b/arch/parisc/kernel/syscall.S @@ -919,24 +919,24 @@ ENTRY(lws_table) END(lws_table) /* End of lws table */ +#ifdef CONFIG_64BIT +#define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, compat) +#else +#define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, native) +#endif #define __SYSCALL(nr, entry) ASM_ULONG_INSN entry .align 8 ENTRY(sys_call_table) .export sys_call_table,data -#ifdef CONFIG_64BIT -#include <asm/syscall_table_c32.h> /* Compat syscalls */ -#else -#include <asm/syscall_table_32.h> /* 32-bit native syscalls */ -#endif +#include <asm/syscall_table_32.h> /* 32-bit syscalls */ END(sys_call_table) #ifdef CONFIG_64BIT .align 8 ENTRY(sys_call_table64) -#include <asm/syscall_table_64.h> /* 64-bit native syscalls */ +#include <asm/syscall_table_64.h> /* 64-bit syscalls */ END(sys_call_table64) #endif -#undef __SYSCALL /* All light-weight-syscall atomic operations @@ -961,5 +961,3 @@ END(lws_lock_start) .previous .end - - diff --git a/arch/parisc/kernel/syscalls/Makefile b/arch/parisc/kernel/syscalls/Makefile index 283f64407b07..0f2ea5bcb0d7 100644 --- a/arch/parisc/kernel/syscalls/Makefile +++ b/arch/parisc/kernel/syscalls/Makefile @@ -6,46 +6,34 @@ _dummy := $(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)') \ $(shell [ -d '$(kapi)' ] || mkdir -p '$(kapi)') syscall := $(src)/syscall.tbl -syshdr := $(srctree)/$(src)/syscallhdr.sh -systbl := $(srctree)/$(src)/syscalltbl.sh +syshdr := $(srctree)/scripts/syscallhdr.sh +systbl := $(srctree)/scripts/syscalltbl.sh quiet_cmd_syshdr = SYSHDR $@ - cmd_syshdr = $(CONFIG_SHELL) '$(syshdr)' '$<' '$@' \ - '$(syshdr_abis_$(basetarget))' \ - '$(syshdr_pfx_$(basetarget))' \ - '$(syshdr_offset_$(basetarget))' + cmd_syshdr = $(CONFIG_SHELL) $(syshdr) --emit-nr --abis $(abis) $< $@ quiet_cmd_systbl = SYSTBL $@ - cmd_systbl = $(CONFIG_SHELL) '$(systbl)' '$<' '$@' \ - '$(systbl_abis_$(basetarget))' \ - '$(systbl_abi_$(basetarget))' \ - '$(systbl_offset_$(basetarget))' + cmd_systbl = $(CONFIG_SHELL) $(systbl) --abis $(abis) $< $@ -syshdr_abis_unistd_32 := common,32 +$(uapi)/unistd_32.h: abis := common,32 $(uapi)/unistd_32.h: $(syscall) $(syshdr) FORCE $(call if_changed,syshdr) -syshdr_abis_unistd_64 := common,64 +$(uapi)/unistd_64.h: abis := common,64 $(uapi)/unistd_64.h: $(syscall) $(syshdr) FORCE $(call if_changed,syshdr) -systbl_abis_syscall_table_32 := common,32 +$(kapi)/syscall_table_32.h: abis := common,32 $(kapi)/syscall_table_32.h: $(syscall) $(systbl) FORCE $(call if_changed,systbl) -systbl_abis_syscall_table_64 := common,64 +$(kapi)/syscall_table_64.h: abis := common,64 $(kapi)/syscall_table_64.h: $(syscall) $(systbl) FORCE $(call if_changed,systbl) -systbl_abis_syscall_table_c32 := common,32 -systbl_abi_syscall_table_c32 := c32 -$(kapi)/syscall_table_c32.h: $(syscall) $(systbl) FORCE - $(call if_changed,systbl) - uapisyshdr-y += unistd_32.h unistd_64.h kapisyshdr-y += syscall_table_32.h \ - syscall_table_64.h \ - syscall_table_c32.h + syscall_table_64.h uapisyshdr-y := $(addprefix $(uapi)/, $(uapisyshdr-y)) kapisyshdr-y := $(addprefix $(kapi)/, $(kapisyshdr-y)) diff --git a/arch/parisc/kernel/syscalls/syscallhdr.sh b/arch/parisc/kernel/syscalls/syscallhdr.sh deleted file mode 100644 index 730db288fe54..000000000000 --- a/arch/parisc/kernel/syscalls/syscallhdr.sh +++ /dev/null @@ -1,36 +0,0 @@ -#!/bin/sh -# SPDX-License-Identifier: GPL-2.0 - -in="$1" -out="$2" -my_abis=`echo "($3)" | tr ',' '|'` -prefix="$4" -offset="$5" - -fileguard=_UAPI_ASM_PARISC_`basename "$out" | sed \ - -e 'y/abcdefghijklmnopqrstuvwxyz/ABCDEFGHIJKLMNOPQRSTUVWXYZ/' \ - -e 's/[^A-Z0-9_]/_/g' -e 's/__/_/g'` -grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | ( - printf "#ifndef %s\n" "${fileguard}" - printf "#define %s\n" "${fileguard}" - printf "\n" - - nxt=0 - while read nr abi name entry compat ; do - if [ -z "$offset" ]; then - printf "#define __NR_%s%s\t%s\n" \ - "${prefix}" "${name}" "${nr}" - else - printf "#define __NR_%s%s\t(%s + %s)\n" \ - "${prefix}" "${name}" "${offset}" "${nr}" - fi - nxt=$((nr+1)) - done - - printf "\n" - printf "#ifdef __KERNEL__\n" - printf "#define __NR_syscalls\t%s\n" "${nxt}" - printf "#endif\n" - printf "\n" - printf "#endif /* %s */\n" "${fileguard}" -) > "$out" diff --git a/arch/parisc/kernel/syscalls/syscalltbl.sh b/arch/parisc/kernel/syscalls/syscalltbl.sh deleted file mode 100644 index f7393a7b18aa..000000000000 --- a/arch/parisc/kernel/syscalls/syscalltbl.sh +++ /dev/null @@ -1,36 +0,0 @@ -#!/bin/sh -# SPDX-License-Identifier: GPL-2.0 - -in="$1" -out="$2" -my_abis=`echo "($3)" | tr ',' '|'` -my_abi="$4" -offset="$5" - -emit() { - t_nxt="$1" - t_nr="$2" - t_entry="$3" - - while [ $t_nxt -lt $t_nr ]; do - printf "__SYSCALL(%s,sys_ni_syscall)\n" "${t_nxt}" - t_nxt=$((t_nxt+1)) - done - printf "__SYSCALL(%s,%s)\n" "${t_nxt}" "${t_entry}" -} - -grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | ( - nxt=0 - if [ -z "$offset" ]; then - offset=0 - fi - - while read nr abi name entry compat ; do - if [ "$my_abi" = "c32" ] && [ ! -z "$compat" ]; then - emit $((nxt+offset)) $((nr+offset)) $compat - else - emit $((nxt+offset)) $((nr+offset)) $entry - fi - nxt=$((nr+1)) - done -) > "$out" diff --git a/arch/parisc/mm/hugetlbpage.c b/arch/parisc/mm/hugetlbpage.c index 43652de5f139..d1d3990b83f6 100644 --- a/arch/parisc/mm/hugetlbpage.c +++ b/arch/parisc/mm/hugetlbpage.c @@ -44,7 +44,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, } -pte_t *huge_pte_alloc(struct mm_struct *mm, +pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, unsigned long sz) { pgd_t *pgd; diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index ab17a56c3d10..088dd2afcfe4 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -118,6 +118,8 @@ config PPC # Please keep this list sorted alphabetically. # select ARCH_32BIT_OFF_T if PPC32 + select ARCH_ENABLE_MEMORY_HOTPLUG + select ARCH_ENABLE_MEMORY_HOTREMOVE select ARCH_HAS_COPY_MC if PPC64 select ARCH_HAS_DEBUG_VIRTUAL select ARCH_HAS_DEBUG_VM_PGTABLE @@ -184,8 +186,8 @@ config PPC select GENERIC_TIME_VSYSCALL select GENERIC_VDSO_TIME_NS select HAVE_ARCH_AUDITSYSCALL - select HAVE_ARCH_HUGE_VMAP if PPC_BOOK3S_64 && PPC_RADIX_MMU select HAVE_ARCH_HUGE_VMALLOC if HAVE_ARCH_HUGE_VMAP + select HAVE_ARCH_HUGE_VMAP if PPC_BOOK3S_64 && PPC_RADIX_MMU select HAVE_ARCH_JUMP_LABEL select HAVE_ARCH_JUMP_LABEL_RELATIVE select HAVE_ARCH_KASAN if PPC32 && PPC_PAGE_SHIFT <= 14 @@ -244,6 +246,7 @@ config PPC select HAVE_STACKPROTECTOR if PPC64 && $(cc-option,-mstack-protector-guard=tls -mstack-protector-guard-reg=r13) select HAVE_SYSCALL_TRACEPOINTS select HAVE_VIRT_CPU_ACCOUNTING + select HUGETLB_PAGE_SIZE_VARIABLE if PPC_BOOK3S_64 && HUGETLB_PAGE select IOMMU_HELPER if PPC64 select IRQ_DOMAIN select IRQ_FORCED_THREADING @@ -421,11 +424,6 @@ config HIGHMEM source "kernel/Kconfig.hz" -config HUGETLB_PAGE_SIZE_VARIABLE - bool - depends on HUGETLB_PAGE && PPC_BOOK3S_64 - default y - config MATH_EMULATION bool "Math emulation" depends on 4xx || PPC_8xx || PPC_MPC832x || BOOKE @@ -521,12 +519,6 @@ config ARCH_CPU_PROBE_RELEASE def_bool y depends on HOTPLUG_CPU -config ARCH_ENABLE_MEMORY_HOTPLUG - def_bool y - -config ARCH_ENABLE_MEMORY_HOTREMOVE - def_bool y - config PPC64_SUPPORTS_MEMORY_FAILURE bool "Add support for memory hwpoison" depends on PPC_BOOK3S_64 @@ -706,9 +698,6 @@ config ARCH_SPARSEMEM_DEFAULT def_bool y depends on PPC_BOOK3S_64 -config SYS_SUPPORTS_HUGETLBFS - bool - config ILLEGAL_POINTER_VALUE hex # This is roughly half way between the top of user space and the bottom diff --git a/arch/powerpc/include/asm/ftrace.h b/arch/powerpc/include/asm/ftrace.h index bc76970b6ee5..debe8c4f7062 100644 --- a/arch/powerpc/include/asm/ftrace.h +++ b/arch/powerpc/include/asm/ftrace.h @@ -12,7 +12,7 @@ #ifdef __ASSEMBLY__ -/* Based off of objdump optput from glibc */ +/* Based off of objdump output from glibc */ #define MCOUNT_SAVE_FRAME \ stwu r1,-48(r1); \ @@ -52,7 +52,7 @@ extern void _mcount(void); static inline unsigned long ftrace_call_adjust(unsigned long addr) { - /* reloction of mcount call site is the same as the address */ + /* relocation of mcount call site is the same as the address */ return addr; } diff --git a/arch/powerpc/include/asm/io.h b/arch/powerpc/include/asm/io.h index 273edd208ec5..f130783c8301 100644 --- a/arch/powerpc/include/asm/io.h +++ b/arch/powerpc/include/asm/io.h @@ -663,11 +663,6 @@ static inline void name at \ #define xlate_dev_mem_ptr(p) __va(p) /* - * Convert a virtual cached pointer to an uncached pointer - */ -#define xlate_dev_kmem_ptr(p) p - -/* * We don't do relaxed operations yet, at least not with this semantic */ #define readb_relaxed(addr) readb(addr) diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index d142b76d507d..9a75ba078e1b 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c @@ -106,7 +106,8 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp, * At this point we do the placement change only for BOOK3S 64. This would * possibly work on other subarchs. */ -pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz) +pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, + unsigned long addr, unsigned long sz) { pgd_t *pg; p4d_t *p4; diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype index e4b05667686e..f998e655b570 100644 --- a/arch/powerpc/platforms/Kconfig.cputype +++ b/arch/powerpc/platforms/Kconfig.cputype @@ -40,8 +40,8 @@ config PPC_85xx config PPC_8xx bool "Freescale 8xx" + select ARCH_SUPPORTS_HUGETLBFS select FSL_SOC - select SYS_SUPPORTS_HUGETLBFS select PPC_HAVE_KUEP select PPC_HAVE_KUAP select HAVE_ARCH_VMAP_STACK @@ -95,9 +95,11 @@ config PPC_BOOK3S_64 bool "Server processors" select PPC_FPU select PPC_HAVE_PMU_SUPPORT - select SYS_SUPPORTS_HUGETLBFS select HAVE_ARCH_TRANSPARENT_HUGEPAGE + select ARCH_ENABLE_HUGEPAGE_MIGRATION if HUGETLB_PAGE && MIGRATION + select ARCH_ENABLE_PMD_SPLIT_PTLOCK select ARCH_ENABLE_THP_MIGRATION if TRANSPARENT_HUGEPAGE + select ARCH_SUPPORTS_HUGETLBFS select ARCH_SUPPORTS_NUMA_BALANCING select IRQ_WORK select PPC_MM_SLICES @@ -280,9 +282,9 @@ config FSL_BOOKE # this is for common code between PPC32 & PPC64 FSL BOOKE config PPC_FSL_BOOK3E bool + select ARCH_SUPPORTS_HUGETLBFS if PHYS_64BIT || PPC64 select FSL_EMB_PERFMON select PPC_SMP_MUXED_IPI - select SYS_SUPPORTS_HUGETLBFS if PHYS_64BIT || PPC64 select PPC_DOORBELL default y if FSL_BOOKE @@ -358,10 +360,6 @@ config SPE If in doubt, say Y here. -config ARCH_ENABLE_SPLIT_PMD_PTLOCK - def_bool y - depends on PPC_BOOK3S_64 - config PPC_RADIX_MMU bool "Radix MMU Support" depends on PPC_BOOK3S_64 @@ -421,10 +419,6 @@ config PPC_PKEY depends on PPC_BOOK3S_64 depends on PPC_MEM_KEYS || PPC_KUAP || PPC_KUEP -config ARCH_ENABLE_HUGEPAGE_MIGRATION - def_bool y - depends on PPC_BOOK3S_64 && HUGETLB_PAGE && MIGRATION - config PPC_MMU_NOHASH def_bool y diff --git a/arch/powerpc/platforms/pseries/svm.c b/arch/powerpc/platforms/pseries/svm.c index 7b739cc7a8a9..1d829e257996 100644 --- a/arch/powerpc/platforms/pseries/svm.c +++ b/arch/powerpc/platforms/pseries/svm.c @@ -55,9 +55,9 @@ void __init svm_swiotlb_init(void) if (vstart && !swiotlb_init_with_tbl(vstart, io_tlb_nslabs, false)) return; - if (io_tlb_start) - memblock_free_early(io_tlb_start, - PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT)); + + memblock_free_early(__pa(vstart), + PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT)); panic("SVM: Cannot allocate SWIOTLB buffer"); } diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index 4515a10c5d22..a8ad8eb76120 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -20,6 +20,7 @@ config RISCV select ARCH_HAS_DEBUG_VM_PGTABLE select ARCH_HAS_DEBUG_VIRTUAL if MMU select ARCH_HAS_DEBUG_WX + select ARCH_HAS_FORTIFY_SOURCE select ARCH_HAS_GCOV_PROFILE_ALL select ARCH_HAS_GIGANTIC_PAGE select ARCH_HAS_KCOV @@ -27,18 +28,23 @@ config RISCV select ARCH_HAS_PTE_SPECIAL select ARCH_HAS_SET_DIRECT_MAP select ARCH_HAS_SET_MEMORY - select ARCH_HAS_STRICT_KERNEL_RWX if MMU + select ARCH_HAS_STRICT_KERNEL_RWX if MMU && !XIP_KERNEL + select ARCH_HAS_STRICT_MODULE_RWX if MMU && !XIP_KERNEL + select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST select ARCH_OPTIONAL_KERNEL_RWX if ARCH_HAS_STRICT_KERNEL_RWX select ARCH_OPTIONAL_KERNEL_RWX_DEFAULT + select ARCH_SUPPORTS_HUGETLBFS if MMU select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU select ARCH_WANT_FRAME_POINTERS select ARCH_WANT_HUGE_PMD_SHARE if 64BIT + select BINFMT_FLAT_NO_DATA_START_OFFSET if !MMU select CLONE_BACKWARDS select CLINT_TIMER if !MMU select COMMON_CLK select EDAC_SUPPORT select GENERIC_ARCH_TOPOLOGY if SMP select GENERIC_ATOMIC64 if !64BIT + select GENERIC_CLOCKEVENTS_BROADCAST if SMP select GENERIC_EARLY_IOREMAP select GENERIC_GETTIMEOFDAY if HAVE_GENERIC_VDSO select GENERIC_IOREMAP @@ -165,10 +171,6 @@ config ARCH_WANT_GENERAL_HUGETLB config ARCH_SUPPORTS_UPROBES def_bool y -config SYS_SUPPORTS_HUGETLBFS - depends on MMU - def_bool y - config STACKTRACE_SUPPORT def_bool y @@ -204,6 +206,7 @@ config LOCKDEP_SUPPORT def_bool y source "arch/riscv/Kconfig.socs" +source "arch/riscv/Kconfig.erratas" menu "Platform type" @@ -227,7 +230,7 @@ config ARCH_RV64I bool "RV64I" select 64BIT select ARCH_SUPPORTS_INT128 if CC_HAS_INT128 && GCC_VERSION >= 50000 - select HAVE_DYNAMIC_FTRACE if MMU + select HAVE_DYNAMIC_FTRACE if MMU && $(cc-option,-fpatchable-function-entry=8) select HAVE_DYNAMIC_FTRACE_WITH_REGS if HAVE_DYNAMIC_FTRACE select HAVE_FTRACE_MCOUNT_RECORD select HAVE_FUNCTION_GRAPH_TRACER @@ -386,6 +389,31 @@ config RISCV_SBI_V01 help This config allows kernel to use SBI v0.1 APIs. This will be deprecated in future once legacy M-mode software are no longer in use. + +config KEXEC + bool "Kexec system call" + select KEXEC_CORE + select HOTPLUG_CPU if SMP + depends on MMU + help + kexec is a system call that implements the ability to shutdown your + current kernel, and to start another kernel. It is like a reboot + but it is independent of the system firmware. And like a reboot + you can start any kernel with it, not just Linux. + + The name comes from the similarity to the exec system call. + +config CRASH_DUMP + bool "Build kdump crash kernel" + help + Generate crash dump after being started by kexec. This should + be normally only set in special crash dump kernels which are + loaded in the main kernel with kexec-tools into a specially + reserved region and then later executed after a crash by + kdump/kexec. + + For more details see Documentation/admin-guide/kdump/kdump.rst + endmenu menu "Boot options" @@ -438,7 +466,7 @@ config EFI_STUB config EFI bool "UEFI runtime support" - depends on OF + depends on OF && !XIP_KERNEL select LIBFDT select UCS2_STRING select EFI_PARAMS_FROM_FDT @@ -462,11 +490,63 @@ config STACKPROTECTOR_PER_TASK def_bool y depends on STACKPROTECTOR && CC_HAVE_STACKPROTECTOR_TLS +config PHYS_RAM_BASE_FIXED + bool "Explicitly specified physical RAM address" + default n + +config PHYS_RAM_BASE + hex "Platform Physical RAM address" + depends on PHYS_RAM_BASE_FIXED + default "0x80000000" + help + This is the physical address of RAM in the system. It has to be + explicitly specified to run early relocations of read-write data + from flash to RAM. + +config XIP_KERNEL + bool "Kernel Execute-In-Place from ROM" + depends on MMU && SPARSEMEM + # This prevents XIP from being enabled by all{yes,mod}config, which + # fail to build since XIP doesn't support large kernels. + depends on !COMPILE_TEST + select PHYS_RAM_BASE_FIXED + help + Execute-In-Place allows the kernel to run from non-volatile storage + directly addressable by the CPU, such as NOR flash. This saves RAM + space since the text section of the kernel is not loaded from flash + to RAM. Read-write sections, such as the data section and stack, + are still copied to RAM. The XIP kernel is not compressed since + it has to run directly from flash, so it will take more space to + store it. The flash address used to link the kernel object files, + and for storing it, is configuration dependent. Therefore, if you + say Y here, you must know the proper physical address where to + store the kernel image depending on your own flash memory usage. + + Also note that the make target becomes "make xipImage" rather than + "make zImage" or "make Image". The final kernel binary to put in + ROM memory will be arch/riscv/boot/xipImage. + + SPARSEMEM is required because the kernel text and rodata that are + flash resident are not backed by memmap, then any attempt to get + a struct page on those regions will trigger a fault. + + If unsure, say N. + +config XIP_PHYS_ADDR + hex "XIP Kernel Physical Location" + depends on XIP_KERNEL + default "0x21000000" + help + This is the physical address in your flash memory the kernel will + be linked for and stored to. This address is dependent on your + own flash usage. + endmenu config BUILTIN_DTB - def_bool n + bool depends on OF + default y if XIP_KERNEL menu "Power management options" diff --git a/arch/riscv/Kconfig.erratas b/arch/riscv/Kconfig.erratas new file mode 100644 index 000000000000..d5d03ae8d685 --- /dev/null +++ b/arch/riscv/Kconfig.erratas @@ -0,0 +1,44 @@ +menu "CPU errata selection" + +config RISCV_ERRATA_ALTERNATIVE + bool "RISC-V alternative scheme" + default y + help + This Kconfig allows the kernel to automatically patch the + errata required by the execution platform at run time. The + code patching is performed once in the boot stages. It means + that the overhead from this mechanism is just taken once. + +config ERRATA_SIFIVE + bool "SiFive errata" + depends on RISCV_ERRATA_ALTERNATIVE + help + All SiFive errata Kconfig depend on this Kconfig. Disabling + this Kconfig will disable all SiFive errata. Please say "Y" + here if your platform uses SiFive CPU cores. + + Otherwise, please say "N" here to avoid unnecessary overhead. + +config ERRATA_SIFIVE_CIP_453 + bool "Apply SiFive errata CIP-453" + depends on ERRATA_SIFIVE + default y + help + This will apply the SiFive CIP-453 errata to add sign extension + to the $badaddr when exception type is instruction page fault + and instruction access fault. + + If you don't know what to do here, say "Y". + +config ERRATA_SIFIVE_CIP_1200 + bool "Apply SiFive errata CIP-1200" + depends on ERRATA_SIFIVE + default y + help + This will apply the SiFive CIP-1200 errata to repalce all + "sfence.vma addr" with "sfence.vma" to ensure that the addr + has been flushed from TLB. + + If you don't know what to do here, say "Y". + +endmenu diff --git a/arch/riscv/Kconfig.socs b/arch/riscv/Kconfig.socs index e1b2690b6e45..ed963761fbd2 100644 --- a/arch/riscv/Kconfig.socs +++ b/arch/riscv/Kconfig.socs @@ -1,5 +1,12 @@ menu "SoC selection" +config SOC_MICROCHIP_POLARFIRE + bool "Microchip PolarFire SoCs" + select MCHP_CLK_MPFS + select SIFIVE_PLIC + help + This enables support for Microchip PolarFire SoC platforms. + config SOC_SIFIVE bool "SiFive SoCs" select SERIAL_SIFIVE if TTY @@ -7,6 +14,7 @@ config SOC_SIFIVE select CLK_SIFIVE select CLK_SIFIVE_PRCI select SIFIVE_PLIC + select ERRATA_SIFIVE help This enables support for SiFive SoC platform hardware. diff --git a/arch/riscv/Makefile b/arch/riscv/Makefile index 1368d943f1f3..3eb9590a0775 100644 --- a/arch/riscv/Makefile +++ b/arch/riscv/Makefile @@ -82,11 +82,16 @@ CHECKFLAGS += -D__riscv -D__riscv_xlen=$(BITS) # Default target when executing plain make boot := arch/riscv/boot +ifeq ($(CONFIG_XIP_KERNEL),y) +KBUILD_IMAGE := $(boot)/xipImage +else KBUILD_IMAGE := $(boot)/Image.gz +endif head-y := arch/riscv/kernel/head.o core-y += arch/riscv/ +core-$(CONFIG_RISCV_ERRATA_ALTERNATIVE) += arch/riscv/errata/ libs-y += arch/riscv/lib/ libs-$(CONFIG_EFI_STUB) += $(objtree)/drivers/firmware/efi/libstub/lib.a @@ -95,12 +100,14 @@ PHONY += vdso_install vdso_install: $(Q)$(MAKE) $(build)=arch/riscv/kernel/vdso $@ +ifneq ($(CONFIG_XIP_KERNEL),y) ifeq ($(CONFIG_RISCV_M_MODE)$(CONFIG_SOC_CANAAN),yy) KBUILD_IMAGE := $(boot)/loader.bin else KBUILD_IMAGE := $(boot)/Image.gz endif -BOOT_TARGETS := Image Image.gz loader loader.bin +endif +BOOT_TARGETS := Image Image.gz loader loader.bin xipImage all: $(notdir $(KBUILD_IMAGE)) diff --git a/arch/riscv/boot/Makefile b/arch/riscv/boot/Makefile index 03404c84f971..6bf299f70c27 100644 --- a/arch/riscv/boot/Makefile +++ b/arch/riscv/boot/Makefile @@ -17,8 +17,21 @@ KCOV_INSTRUMENT := n OBJCOPYFLAGS_Image :=-O binary -R .note -R .note.gnu.build-id -R .comment -S +OBJCOPYFLAGS_xipImage :=-O binary -R .note -R .note.gnu.build-id -R .comment -S targets := Image Image.* loader loader.o loader.lds loader.bin +targets := Image Image.* loader loader.o loader.lds loader.bin xipImage + +ifeq ($(CONFIG_XIP_KERNEL),y) + +quiet_cmd_mkxip = $(quiet_cmd_objcopy) +cmd_mkxip = $(cmd_objcopy) + +$(obj)/xipImage: vmlinux FORCE + $(call if_changed,mkxip) + @$(kecho) ' Physical Address of xipImage: $(CONFIG_XIP_PHYS_ADDR)' + +endif $(obj)/Image: vmlinux FORCE $(call if_changed,objcopy) diff --git a/arch/riscv/boot/dts/Makefile b/arch/riscv/boot/dts/Makefile index 7ffd502e3e7b..fe996b88319e 100644 --- a/arch/riscv/boot/dts/Makefile +++ b/arch/riscv/boot/dts/Makefile @@ -1,5 +1,6 @@ # SPDX-License-Identifier: GPL-2.0 subdir-y += sifive subdir-$(CONFIG_SOC_CANAAN_K210_DTB_BUILTIN) += canaan +subdir-y += microchip obj-$(CONFIG_BUILTIN_DTB) := $(addsuffix /, $(subdir-y)) diff --git a/arch/riscv/boot/dts/microchip/Makefile b/arch/riscv/boot/dts/microchip/Makefile new file mode 100644 index 000000000000..622b12771fd3 --- /dev/null +++ b/arch/riscv/boot/dts/microchip/Makefile @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0 +dtb-$(CONFIG_SOC_MICROCHIP_POLARFIRE) += microchip-mpfs-icicle-kit.dtb diff --git a/arch/riscv/boot/dts/microchip/microchip-mpfs-icicle-kit.dts b/arch/riscv/boot/dts/microchip/microchip-mpfs-icicle-kit.dts new file mode 100644 index 000000000000..ec79944065c9 --- /dev/null +++ b/arch/riscv/boot/dts/microchip/microchip-mpfs-icicle-kit.dts @@ -0,0 +1,72 @@ +// SPDX-License-Identifier: (GPL-2.0 OR MIT) +/* Copyright (c) 2020 Microchip Technology Inc */ + +/dts-v1/; + +#include "microchip-mpfs.dtsi" + +/* Clock frequency (in Hz) of the rtcclk */ +#define RTCCLK_FREQ 1000000 + +/ { + #address-cells = <2>; + #size-cells = <2>; + model = "Microchip PolarFire-SoC Icicle Kit"; + compatible = "microchip,mpfs-icicle-kit"; + + chosen { + stdout-path = &serial0; + }; + + cpus { + timebase-frequency = <RTCCLK_FREQ>; + }; + + memory@80000000 { + device_type = "memory"; + reg = <0x0 0x80000000 0x0 0x40000000>; + clocks = <&clkcfg 26>; + }; + + soc { + }; +}; + +&serial0 { + status = "okay"; +}; + +&serial1 { + status = "okay"; +}; + +&serial2 { + status = "okay"; +}; + +&serial3 { + status = "okay"; +}; + +&sdcard { + status = "okay"; +}; + +&emac0 { + phy-mode = "sgmii"; + phy-handle = <&phy0>; + phy0: ethernet-phy@8 { + reg = <8>; + ti,fifo-depth = <0x01>; + }; +}; + +&emac1 { + status = "okay"; + phy-mode = "sgmii"; + phy-handle = <&phy1>; + phy1: ethernet-phy@9 { + reg = <9>; + ti,fifo-depth = <0x01>; + }; +}; diff --git a/arch/riscv/boot/dts/microchip/microchip-mpfs.dtsi b/arch/riscv/boot/dts/microchip/microchip-mpfs.dtsi new file mode 100644 index 000000000000..b9819570a7d1 --- /dev/null +++ b/arch/riscv/boot/dts/microchip/microchip-mpfs.dtsi @@ -0,0 +1,329 @@ +// SPDX-License-Identifier: (GPL-2.0 OR MIT) +/* Copyright (c) 2020 Microchip Technology Inc */ + +/dts-v1/; + +/ { + #address-cells = <2>; + #size-cells = <2>; + model = "Microchip MPFS Icicle Kit"; + compatible = "microchip,mpfs-icicle-kit"; + + chosen { + }; + + cpus { + #address-cells = <1>; + #size-cells = <0>; + + cpu@0 { + clock-frequency = <0>; + compatible = "sifive,e51", "sifive,rocket0", "riscv"; + device_type = "cpu"; + i-cache-block-size = <64>; + i-cache-sets = <128>; + i-cache-size = <16384>; + reg = <0>; + riscv,isa = "rv64imac"; + status = "disabled"; + + cpu0_intc: interrupt-controller { + #interrupt-cells = <1>; + compatible = "riscv,cpu-intc"; + interrupt-controller; + }; + }; + + cpu@1 { + clock-frequency = <0>; + compatible = "sifive,u54-mc", "sifive,rocket0", "riscv"; + d-cache-block-size = <64>; + d-cache-sets = <64>; + d-cache-size = <32768>; + d-tlb-sets = <1>; + d-tlb-size = <32>; + device_type = "cpu"; + i-cache-block-size = <64>; + i-cache-sets = <64>; + i-cache-size = <32768>; + i-tlb-sets = <1>; + i-tlb-size = <32>; + mmu-type = "riscv,sv39"; + reg = <1>; + riscv,isa = "rv64imafdc"; + tlb-split; + status = "okay"; + + cpu1_intc: interrupt-controller { + #interrupt-cells = <1>; + compatible = "riscv,cpu-intc"; + interrupt-controller; + }; + }; + + cpu@2 { + clock-frequency = <0>; + compatible = "sifive,u54-mc", "sifive,rocket0", "riscv"; + d-cache-block-size = <64>; + d-cache-sets = <64>; + d-cache-size = <32768>; + d-tlb-sets = <1>; + d-tlb-size = <32>; + device_type = "cpu"; + i-cache-block-size = <64>; + i-cache-sets = <64>; + i-cache-size = <32768>; + i-tlb-sets = <1>; + i-tlb-size = <32>; + mmu-type = "riscv,sv39"; + reg = <2>; + riscv,isa = "rv64imafdc"; + tlb-split; + status = "okay"; + + cpu2_intc: interrupt-controller { + #interrupt-cells = <1>; + compatible = "riscv,cpu-intc"; + interrupt-controller; + }; + }; + + cpu@3 { + clock-frequency = <0>; + compatible = "sifive,u54-mc", "sifive,rocket0", "riscv"; + d-cache-block-size = <64>; + d-cache-sets = <64>; + d-cache-size = <32768>; + d-tlb-sets = <1>; + d-tlb-size = <32>; + device_type = "cpu"; + i-cache-block-size = <64>; + i-cache-sets = <64>; + i-cache-size = <32768>; + i-tlb-sets = <1>; + i-tlb-size = <32>; + mmu-type = "riscv,sv39"; + reg = <3>; + riscv,isa = "rv64imafdc"; + tlb-split; + status = "okay"; + + cpu3_intc: interrupt-controller { + #interrupt-cells = <1>; + compatible = "riscv,cpu-intc"; + interrupt-controller; + }; + }; + + cpu@4 { + clock-frequency = <0>; + compatible = "sifive,u54-mc", "sifive,rocket0", "riscv"; + d-cache-block-size = <64>; + d-cache-sets = <64>; + d-cache-size = <32768>; + d-tlb-sets = <1>; + d-tlb-size = <32>; + device_type = "cpu"; + i-cache-block-size = <64>; + i-cache-sets = <64>; + i-cache-size = <32768>; + i-tlb-sets = <1>; + i-tlb-size = <32>; + mmu-type = "riscv,sv39"; + reg = <4>; + riscv,isa = "rv64imafdc"; + tlb-split; + status = "okay"; + cpu4_intc: interrupt-controller { + #interrupt-cells = <1>; + compatible = "riscv,cpu-intc"; + interrupt-controller; + }; + }; + }; + + soc { + #address-cells = <2>; + #size-cells = <2>; + compatible = "simple-bus"; + ranges; + + cache-controller@2010000 { + compatible = "sifive,fu540-c000-ccache", "cache"; + cache-block-size = <64>; + cache-level = <2>; + cache-sets = <1024>; + cache-size = <2097152>; + cache-unified; + interrupt-parent = <&plic>; + interrupts = <1 2 3>; + reg = <0x0 0x2010000 0x0 0x1000>; + }; + + clint@2000000 { + compatible = "sifive,clint0"; + reg = <0x0 0x2000000 0x0 0xC000>; + interrupts-extended = <&cpu0_intc 3 &cpu0_intc 7 + &cpu1_intc 3 &cpu1_intc 7 + &cpu2_intc 3 &cpu2_intc 7 + &cpu3_intc 3 &cpu3_intc 7 + &cpu4_intc 3 &cpu4_intc 7>; + }; + + plic: interrupt-controller@c000000 { + #interrupt-cells = <1>; + compatible = "sifive,plic-1.0.0"; + reg = <0x0 0xc000000 0x0 0x4000000>; + riscv,ndev = <186>; + interrupt-controller; + interrupts-extended = <&cpu0_intc 11 + &cpu1_intc 11 &cpu1_intc 9 + &cpu2_intc 11 &cpu2_intc 9 + &cpu3_intc 11 &cpu3_intc 9 + &cpu4_intc 11 &cpu4_intc 9>; + }; + + dma@3000000 { + compatible = "sifive,fu540-c000-pdma"; + reg = <0x0 0x3000000 0x0 0x8000>; + interrupt-parent = <&plic>; + interrupts = <23 24 25 26 27 28 29 30>; + #dma-cells = <1>; + }; + + refclk: refclk { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <600000000>; + clock-output-names = "msspllclk"; + }; + + clkcfg: clkcfg@20002000 { + compatible = "microchip,mpfs-clkcfg"; + reg = <0x0 0x20002000 0x0 0x1000>; + reg-names = "mss_sysreg"; + clocks = <&refclk>; + #clock-cells = <1>; + clock-output-names = "cpu", "axi", "ahb", "envm", /* 0-3 */ + "mac0", "mac1", "mmc", "timer", /* 4-7 */ + "mmuart0", "mmuart1", "mmuart2", "mmuart3", /* 8-11 */ + "mmuart4", "spi0", "spi1", "i2c0", /* 12-15 */ + "i2c1", "can0", "can1", "usb", /* 16-19 */ + "rsvd", "rtc", "qspi", "gpio0", /* 20-23 */ + "gpio1", "gpio2", "ddrc", "fic0", /* 24-27 */ + "fic1", "fic2", "fic3", "athena", "cfm"; /* 28-32 */ + }; + + serial0: serial@20000000 { + compatible = "ns16550a"; + reg = <0x0 0x20000000 0x0 0x400>; + reg-io-width = <4>; + reg-shift = <2>; + interrupt-parent = <&plic>; + interrupts = <90>; + current-speed = <115200>; + clocks = <&clkcfg 8>; + status = "disabled"; + }; + + serial1: serial@20100000 { + compatible = "ns16550a"; + reg = <0x0 0x20100000 0x0 0x400>; + reg-io-width = <4>; + reg-shift = <2>; + interrupt-parent = <&plic>; + interrupts = <91>; + current-speed = <115200>; + clocks = <&clkcfg 9>; + status = "disabled"; + }; + + serial2: serial@20102000 { + compatible = "ns16550a"; + reg = <0x0 0x20102000 0x0 0x400>; + reg-io-width = <4>; + reg-shift = <2>; + interrupt-parent = <&plic>; + interrupts = <92>; + current-speed = <115200>; + clocks = <&clkcfg 10>; + status = "disabled"; + }; + + serial3: serial@20104000 { + compatible = "ns16550a"; + reg = <0x0 0x20104000 0x0 0x400>; + reg-io-width = <4>; + reg-shift = <2>; + interrupt-parent = <&plic>; + interrupts = <93>; + current-speed = <115200>; + clocks = <&clkcfg 11>; + status = "disabled"; + }; + + emmc: mmc@20008000 { + compatible = "cdns,sd4hc"; + reg = <0x0 0x20008000 0x0 0x1000>; + interrupt-parent = <&plic>; + interrupts = <88 89>; + pinctrl-names = "default"; + clocks = <&clkcfg 6>; + bus-width = <4>; + cap-mmc-highspeed; + mmc-ddr-3_3v; + max-frequency = <200000000>; + non-removable; + no-sd; + no-sdio; + voltage-ranges = <3300 3300>; + status = "disabled"; + }; + + sdcard: sdhc@20008000 { + compatible = "cdns,sd4hc"; + reg = <0x0 0x20008000 0x0 0x1000>; + interrupt-parent = <&plic>; + interrupts = <88>; + pinctrl-names = "default"; + clocks = <&clkcfg 6>; + bus-width = <4>; + disable-wp; + cap-sd-highspeed; + card-detect-delay = <200>; + sd-uhs-sdr12; + sd-uhs-sdr25; + sd-uhs-sdr50; + sd-uhs-sdr104; + max-frequency = <200000000>; + status = "disabled"; + }; + + emac0: ethernet@20110000 { + compatible = "cdns,macb"; + reg = <0x0 0x20110000 0x0 0x2000>; + interrupt-parent = <&plic>; + interrupts = <64 65 66 67>; + local-mac-address = [00 00 00 00 00 00]; + clocks = <&clkcfg 4>, <&clkcfg 2>; + clock-names = "pclk", "hclk"; + status = "disabled"; + #address-cells = <1>; + #size-cells = <0>; + }; + + emac1: ethernet@20112000 { + compatible = "cdns,macb"; + reg = <0x0 0x20112000 0x0 0x2000>; + interrupt-parent = <&plic>; + interrupts = <70 71 72 73>; + mac-address = [00 00 00 00 00 00]; + clocks = <&clkcfg 5>, <&clkcfg 2>; + status = "disabled"; + clock-names = "pclk", "hclk"; + #address-cells = <1>; + #size-cells = <0>; + }; + + }; +}; diff --git a/arch/riscv/boot/dts/sifive/fu740-c000.dtsi b/arch/riscv/boot/dts/sifive/fu740-c000.dtsi index eeb4f8c3e0e7..8eef82e4199f 100644 --- a/arch/riscv/boot/dts/sifive/fu740-c000.dtsi +++ b/arch/riscv/boot/dts/sifive/fu740-c000.dtsi @@ -159,6 +159,7 @@ reg = <0x0 0x10000000 0x0 0x1000>; clocks = <&hfclk>, <&rtcclk>; #clock-cells = <1>; + #reset-cells = <1>; }; uart0: serial@10010000 { compatible = "sifive,fu740-c000-uart", "sifive,uart0"; @@ -289,5 +290,37 @@ clocks = <&prci PRCI_CLK_PCLK>; status = "disabled"; }; + pcie@e00000000 { + compatible = "sifive,fu740-pcie"; + #address-cells = <3>; + #size-cells = <2>; + #interrupt-cells = <1>; + reg = <0xe 0x00000000 0x0 0x80000000>, + <0xd 0xf0000000 0x0 0x10000000>, + <0x0 0x100d0000 0x0 0x1000>; + reg-names = "dbi", "config", "mgmt"; + device_type = "pci"; + dma-coherent; + bus-range = <0x0 0xff>; + ranges = <0x81000000 0x0 0x60080000 0x0 0x60080000 0x0 0x10000>, /* I/O */ + <0x82000000 0x0 0x60090000 0x0 0x60090000 0x0 0xff70000>, /* mem */ + <0x82000000 0x0 0x70000000 0x0 0x70000000 0x0 0x1000000>, /* mem */ + <0xc3000000 0x20 0x00000000 0x20 0x00000000 0x20 0x00000000>; /* mem prefetchable */ + num-lanes = <0x8>; + interrupts = <56>, <57>, <58>, <59>, <60>, <61>, <62>, <63>, <64>; + interrupt-names = "msi", "inta", "intb", "intc", "intd"; + interrupt-parent = <&plic0>; + interrupt-map-mask = <0x0 0x0 0x0 0x7>; + interrupt-map = <0x0 0x0 0x0 0x1 &plic0 57>, + <0x0 0x0 0x0 0x2 &plic0 58>, + <0x0 0x0 0x0 0x3 &plic0 59>, + <0x0 0x0 0x0 0x4 &plic0 60>; + clock-names = "pcie_aux"; + clocks = <&prci PRCI_CLK_PCIE_AUX>; + pwren-gpios = <&gpio 5 0>; + reset-gpios = <&gpio 8 0>; + resets = <&prci 4>; + status = "okay"; + }; }; }; diff --git a/arch/riscv/boot/loader.lds.S b/arch/riscv/boot/loader.lds.S index 47a5003c2e28..62d94696a19c 100644 --- a/arch/riscv/boot/loader.lds.S +++ b/arch/riscv/boot/loader.lds.S @@ -1,13 +1,14 @@ /* SPDX-License-Identifier: GPL-2.0 */ #include <asm/page.h> +#include <asm/pgtable.h> OUTPUT_ARCH(riscv) ENTRY(_start) SECTIONS { - . = PAGE_OFFSET; + . = KERNEL_LINK_ADDR; .payload : { *(.payload) diff --git a/arch/riscv/configs/defconfig b/arch/riscv/configs/defconfig index 6c0625aa96c7..1f2be234b11c 100644 --- a/arch/riscv/configs/defconfig +++ b/arch/riscv/configs/defconfig @@ -16,6 +16,7 @@ CONFIG_EXPERT=y CONFIG_BPF_SYSCALL=y CONFIG_SOC_SIFIVE=y CONFIG_SOC_VIRT=y +CONFIG_SOC_MICROCHIP_POLARFIRE=y CONFIG_SMP=y CONFIG_HOTPLUG_CPU=y CONFIG_JUMP_LABEL=y @@ -82,6 +83,9 @@ CONFIG_USB_OHCI_HCD=y CONFIG_USB_OHCI_HCD_PLATFORM=y CONFIG_USB_STORAGE=y CONFIG_USB_UAS=y +CONFIG_MMC_SDHCI=y +CONFIG_MMC_SDHCI_PLTFM=y +CONFIG_MMC_SDHCI_CADENCE=y CONFIG_MMC=y CONFIG_MMC_SPI=y CONFIG_RTC_CLASS=y diff --git a/arch/riscv/errata/Makefile b/arch/riscv/errata/Makefile new file mode 100644 index 000000000000..b8f8740a3e44 --- /dev/null +++ b/arch/riscv/errata/Makefile @@ -0,0 +1,2 @@ +obj-y += alternative.o +obj-$(CONFIG_ERRATA_SIFIVE) += sifive/ diff --git a/arch/riscv/errata/alternative.c b/arch/riscv/errata/alternative.c new file mode 100644 index 000000000000..3b15885db70b --- /dev/null +++ b/arch/riscv/errata/alternative.c @@ -0,0 +1,74 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * alternative runtime patching + * inspired by the ARM64 and x86 version + * + * Copyright (C) 2021 Sifive. + */ + +#include <linux/init.h> +#include <linux/cpu.h> +#include <linux/uaccess.h> +#include <asm/alternative.h> +#include <asm/sections.h> +#include <asm/vendorid_list.h> +#include <asm/sbi.h> +#include <asm/csr.h> + +static struct cpu_manufacturer_info_t { + unsigned long vendor_id; + unsigned long arch_id; + unsigned long imp_id; +} cpu_mfr_info; + +static void (*vendor_patch_func)(struct alt_entry *begin, struct alt_entry *end, + unsigned long archid, unsigned long impid); + +static inline void __init riscv_fill_cpu_mfr_info(void) +{ +#ifdef CONFIG_RISCV_M_MODE + cpu_mfr_info.vendor_id = csr_read(CSR_MVENDORID); + cpu_mfr_info.arch_id = csr_read(CSR_MARCHID); + cpu_mfr_info.imp_id = csr_read(CSR_MIMPID); +#else + cpu_mfr_info.vendor_id = sbi_get_mvendorid(); + cpu_mfr_info.arch_id = sbi_get_marchid(); + cpu_mfr_info.imp_id = sbi_get_mimpid(); +#endif +} + +static void __init init_alternative(void) +{ + riscv_fill_cpu_mfr_info(); + + switch (cpu_mfr_info.vendor_id) { +#ifdef CONFIG_ERRATA_SIFIVE + case SIFIVE_VENDOR_ID: + vendor_patch_func = sifive_errata_patch_func; + break; +#endif + default: + vendor_patch_func = NULL; + } +} + +/* + * This is called very early in the boot process (directly after we run + * a feature detect on the boot CPU). No need to worry about other CPUs + * here. + */ +void __init apply_boot_alternatives(void) +{ + /* If called on non-boot cpu things could go wrong */ + WARN_ON(smp_processor_id() != 0); + + init_alternative(); + + if (!vendor_patch_func) + return; + + vendor_patch_func((struct alt_entry *)__alt_start, + (struct alt_entry *)__alt_end, + cpu_mfr_info.arch_id, cpu_mfr_info.imp_id); +} + diff --git a/arch/riscv/errata/sifive/Makefile b/arch/riscv/errata/sifive/Makefile new file mode 100644 index 000000000000..bdd5fc843b8e --- /dev/null +++ b/arch/riscv/errata/sifive/Makefile @@ -0,0 +1,2 @@ +obj-y += errata_cip_453.o +obj-y += errata.o diff --git a/arch/riscv/errata/sifive/errata.c b/arch/riscv/errata/sifive/errata.c new file mode 100644 index 000000000000..f5e5ae70e829 --- /dev/null +++ b/arch/riscv/errata/sifive/errata.c @@ -0,0 +1,106 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2021 Sifive. + */ + +#include <linux/kernel.h> +#include <linux/string.h> +#include <linux/bug.h> +#include <asm/patch.h> +#include <asm/alternative.h> +#include <asm/vendorid_list.h> +#include <asm/errata_list.h> + +struct errata_info_t { + char name[ERRATA_STRING_LENGTH_MAX]; + bool (*check_func)(unsigned long arch_id, unsigned long impid); +}; + +static bool errata_cip_453_check_func(unsigned long arch_id, unsigned long impid) +{ + /* + * Affected cores: + * Architecture ID: 0x8000000000000007 + * Implement ID: 0x20181004 <= impid <= 0x20191105 + */ + if (arch_id != 0x8000000000000007 || + (impid < 0x20181004 || impid > 0x20191105)) + return false; + return true; +} + +static bool errata_cip_1200_check_func(unsigned long arch_id, unsigned long impid) +{ + /* + * Affected cores: + * Architecture ID: 0x8000000000000007 or 0x1 + * Implement ID: mimpid[23:0] <= 0x200630 and mimpid != 0x01200626 + */ + if (arch_id != 0x8000000000000007 && arch_id != 0x1) + return false; + if ((impid & 0xffffff) > 0x200630 || impid == 0x1200626) + return false; + return true; +} + +static struct errata_info_t errata_list[ERRATA_SIFIVE_NUMBER] = { + { + .name = "cip-453", + .check_func = errata_cip_453_check_func + }, + { + .name = "cip-1200", + .check_func = errata_cip_1200_check_func + }, +}; + +static u32 __init sifive_errata_probe(unsigned long archid, unsigned long impid) +{ + int idx; + u32 cpu_req_errata = 0; + + for (idx = 0; idx < ERRATA_SIFIVE_NUMBER; idx++) + if (errata_list[idx].check_func(archid, impid)) + cpu_req_errata |= (1U << idx); + + return cpu_req_errata; +} + +static void __init warn_miss_errata(u32 miss_errata) +{ + int i; + + pr_warn("----------------------------------------------------------------\n"); + pr_warn("WARNING: Missing the following errata may cause potential issues\n"); + for (i = 0; i < ERRATA_SIFIVE_NUMBER; i++) + if (miss_errata & 0x1 << i) + pr_warn("\tSiFive Errata[%d]:%s\n", i, errata_list[i].name); + pr_warn("Please enable the corresponding Kconfig to apply them\n"); + pr_warn("----------------------------------------------------------------\n"); +} + +void __init sifive_errata_patch_func(struct alt_entry *begin, struct alt_entry *end, + unsigned long archid, unsigned long impid) +{ + struct alt_entry *alt; + u32 cpu_req_errata = sifive_errata_probe(archid, impid); + u32 cpu_apply_errata = 0; + u32 tmp; + + for (alt = begin; alt < end; alt++) { + if (alt->vendor_id != SIFIVE_VENDOR_ID) + continue; + if (alt->errata_id >= ERRATA_SIFIVE_NUMBER) { + WARN(1, "This errata id:%d is not in kernel errata list", alt->errata_id); + continue; + } + + tmp = (1U << alt->errata_id); + if (cpu_req_errata & tmp) { + patch_text_nosync(alt->old_ptr, alt->alt_ptr, alt->alt_len); + cpu_apply_errata |= tmp; + } + } + if (cpu_apply_errata != cpu_req_errata) + warn_miss_errata(cpu_req_errata - cpu_apply_errata); +} diff --git a/arch/riscv/errata/sifive/errata_cip_453.S b/arch/riscv/errata/sifive/errata_cip_453.S new file mode 100644 index 000000000000..f1b9623fe1de --- /dev/null +++ b/arch/riscv/errata/sifive/errata_cip_453.S @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2021 SiFive + */ + +#include <linux/linkage.h> +#include <asm/asm.h> +#include <asm/asm-offsets.h> +#include <asm/alternative.h> + +.macro ADD_SIGN_EXT pt_reg badaddr tmp_reg + REG_L \badaddr, PT_BADADDR(\pt_reg) + li \tmp_reg,1 + slli \tmp_reg,\tmp_reg,0x26 + and \tmp_reg,\tmp_reg,\badaddr + beqz \tmp_reg, 1f + li \tmp_reg,-1 + slli \tmp_reg,\tmp_reg,0x27 + or \badaddr,\tmp_reg,\badaddr + REG_S \badaddr, PT_BADADDR(\pt_reg) +1: +.endm + +ENTRY(sifive_cip_453_page_fault_trp) + ADD_SIGN_EXT a0, t0, t1 +#ifdef CONFIG_MMU + la t0, do_page_fault +#else + la t0, do_trap_unknown +#endif + jr t0 +END(sifive_cip_453_page_fault_trp) + +ENTRY(sifive_cip_453_insn_fault_trp) + ADD_SIGN_EXT a0, t0, t1 + la t0, do_trap_insn_fault + jr t0 +END(sifive_cip_453_insn_fault_trp) diff --git a/arch/riscv/include/asm/alternative-macros.h b/arch/riscv/include/asm/alternative-macros.h new file mode 100644 index 000000000000..88c08705f64a --- /dev/null +++ b/arch/riscv/include/asm/alternative-macros.h @@ -0,0 +1,142 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_ALTERNATIVE_MACROS_H +#define __ASM_ALTERNATIVE_MACROS_H + +#ifdef CONFIG_RISCV_ERRATA_ALTERNATIVE + +#ifdef __ASSEMBLY__ + +.macro ALT_ENTRY oldptr newptr vendor_id errata_id new_len + RISCV_PTR \oldptr + RISCV_PTR \newptr + REG_ASM \vendor_id + REG_ASM \new_len + .word \errata_id +.endm + +.macro ALT_NEW_CONTENT vendor_id, errata_id, enable = 1, new_c : vararg + .if \enable + .pushsection .alternative, "a" + ALT_ENTRY 886b, 888f, \vendor_id, \errata_id, 889f - 888f + .popsection + .subsection 1 +888 : + \new_c +889 : + .previous + .org . - (889b - 888b) + (887b - 886b) + .org . - (887b - 886b) + (889b - 888b) + .endif +.endm + +.macro __ALTERNATIVE_CFG old_c, new_c, vendor_id, errata_id, enable +886 : + \old_c +887 : + ALT_NEW_CONTENT \vendor_id, \errata_id, \enable, \new_c +.endm + +#define _ALTERNATIVE_CFG(old_c, new_c, vendor_id, errata_id, CONFIG_k) \ + __ALTERNATIVE_CFG old_c, new_c, vendor_id, errata_id, IS_ENABLED(CONFIG_k) + +#else /* !__ASSEMBLY__ */ + +#include <asm/asm.h> +#include <linux/stringify.h> + +#define ALT_ENTRY(oldptr, newptr, vendor_id, errata_id, newlen) \ + RISCV_PTR " " oldptr "\n" \ + RISCV_PTR " " newptr "\n" \ + REG_ASM " " vendor_id "\n" \ + REG_ASM " " newlen "\n" \ + ".word " errata_id "\n" + +#define ALT_NEW_CONSTENT(vendor_id, errata_id, enable, new_c) \ + ".if " __stringify(enable) " == 1\n" \ + ".pushsection .alternative, \"a\"\n" \ + ALT_ENTRY("886b", "888f", __stringify(vendor_id), __stringify(errata_id), "889f - 888f") \ + ".popsection\n" \ + ".subsection 1\n" \ + "888 :\n" \ + new_c "\n" \ + "889 :\n" \ + ".previous\n" \ + ".org . - (887b - 886b) + (889b - 888b)\n" \ + ".org . - (889b - 888b) + (887b - 886b)\n" \ + ".endif\n" + +#define __ALTERNATIVE_CFG(old_c, new_c, vendor_id, errata_id, enable) \ + "886 :\n" \ + old_c "\n" \ + "887 :\n" \ + ALT_NEW_CONSTENT(vendor_id, errata_id, enable, new_c) + +#define _ALTERNATIVE_CFG(old_c, new_c, vendor_id, errata_id, CONFIG_k) \ + __ALTERNATIVE_CFG(old_c, new_c, vendor_id, errata_id, IS_ENABLED(CONFIG_k)) + +#endif /* __ASSEMBLY__ */ + +#else /* !CONFIG_RISCV_ERRATA_ALTERNATIVE*/ +#ifdef __ASSEMBLY__ + +.macro __ALTERNATIVE_CFG old_c + \old_c +.endm + +#define _ALTERNATIVE_CFG(old_c, new_c, vendor_id, errata_id, CONFIG_k) \ + __ALTERNATIVE_CFG old_c + +#else /* !__ASSEMBLY__ */ + +#define __ALTERNATIVE_CFG(old_c) \ + old_c "\n" + +#define _ALTERNATIVE_CFG(old_c, new_c, vendor_id, errata_id, CONFIG_k) \ + __ALTERNATIVE_CFG(old_c) + +#endif /* __ASSEMBLY__ */ +#endif /* CONFIG_RISCV_ERRATA_ALTERNATIVE */ +/* + * Usage: + * ALTERNATIVE(old_content, new_content, vendor_id, errata_id, CONFIG_k) + * in the assembly code. Otherwise, + * asm(ALTERNATIVE(old_content, new_content, vendor_id, errata_id, CONFIG_k)); + * + * old_content: The old content which is probably replaced with new content. + * new_content: The new content. + * vendor_id: The CPU vendor ID. + * errata_id: The errata ID. + * CONFIG_k: The Kconfig of this errata. When Kconfig is disabled, the old + * content will alwyas be executed. + */ +#define ALTERNATIVE(old_content, new_content, vendor_id, errata_id, CONFIG_k) \ + _ALTERNATIVE_CFG(old_content, new_content, vendor_id, errata_id, CONFIG_k) + +/* + * A vendor wants to replace an old_content, but another vendor has used + * ALTERNATIVE() to patch its customized content at the same location. In + * this case, this vendor can create a new macro ALTERNATIVE_2() based + * on the following sample code and then replace ALTERNATIVE() with + * ALTERNATIVE_2() to append its customized content. + * + * .macro __ALTERNATIVE_CFG_2 old_c, new_c_1, vendor_id_1, errata_id_1, enable_1, \ + * new_c_2, vendor_id_2, errata_id_2, enable_2 + * 886 : + * \old_c + * 887 : + * ALT_NEW_CONTENT \vendor_id_1, \errata_id_1, \enable_1, \new_c_1 + * ALT_NEW_CONTENT \vendor_id_2, \errata_id_2, \enable_2, \new_c_2 + * .endm + * + * #define _ALTERNATIVE_CFG_2(old_c, new_c_1, vendor_id_1, errata_id_1, CONFIG_k_1, \ + * new_c_2, vendor_id_2, errata_id_2, CONFIG_k_2) \ + * __ALTERNATIVE_CFG_2 old_c, new_c_1, vendor_id_1, errata_id_1, IS_ENABLED(CONFIG_k_1), \ + * new_c_2, vendor_id_2, errata_id_2, IS_ENABLED(CONFIG_k_2) \ + * + * #define ALTERNATIVE_2(old_content, new_content_1, vendor_id_1, errata_id_1, CONFIG_k_1, \ + * new_content_2, vendor_id_2, errata_id_2, CONFIG_k_2) \ + * _ALTERNATIVE_CFG_2(old_content, new_content_1, vendor_id_1, errata_id_1, CONFIG_k_1, \ + * new_content_2, vendor_id_2, errata_id_2, CONFIG_k_2) + * + */ +#endif diff --git a/arch/riscv/include/asm/alternative.h b/arch/riscv/include/asm/alternative.h new file mode 100644 index 000000000000..e625d3cafbed --- /dev/null +++ b/arch/riscv/include/asm/alternative.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2021 Sifive. + */ + +#ifndef __ASM_ALTERNATIVE_H +#define __ASM_ALTERNATIVE_H + +#define ERRATA_STRING_LENGTH_MAX 32 + +#include <asm/alternative-macros.h> + +#ifndef __ASSEMBLY__ + +#include <linux/init.h> +#include <linux/types.h> +#include <linux/stddef.h> +#include <asm/hwcap.h> + +void __init apply_boot_alternatives(void); + +struct alt_entry { + void *old_ptr; /* address of original instruciton or data */ + void *alt_ptr; /* address of replacement instruction or data */ + unsigned long vendor_id; /* cpu vendor id */ + unsigned long alt_len; /* The replacement size */ + unsigned int errata_id; /* The errata id */ +} __packed; + +struct errata_checkfunc_id { + unsigned long vendor_id; + bool (*func)(struct alt_entry *alt); +}; + +void sifive_errata_patch_func(struct alt_entry *begin, struct alt_entry *end, + unsigned long archid, unsigned long impid); + +#endif +#endif diff --git a/arch/riscv/include/asm/asm.h b/arch/riscv/include/asm/asm.h index 9c992a88d858..618d7c5af1a2 100644 --- a/arch/riscv/include/asm/asm.h +++ b/arch/riscv/include/asm/asm.h @@ -23,6 +23,7 @@ #define REG_L __REG_SEL(ld, lw) #define REG_S __REG_SEL(sd, sw) #define REG_SC __REG_SEL(sc.d, sc.w) +#define REG_ASM __REG_SEL(.dword, .word) #define SZREG __REG_SEL(8, 4) #define LGREG __REG_SEL(3, 2) diff --git a/arch/riscv/include/asm/csr.h b/arch/riscv/include/asm/csr.h index caadfc1d7487..87ac65696871 100644 --- a/arch/riscv/include/asm/csr.h +++ b/arch/riscv/include/asm/csr.h @@ -115,6 +115,9 @@ #define CSR_MIP 0x344 #define CSR_PMPCFG0 0x3a0 #define CSR_PMPADDR0 0x3b0 +#define CSR_MVENDORID 0xf11 +#define CSR_MARCHID 0xf12 +#define CSR_MIMPID 0xf13 #define CSR_MHARTID 0xf14 #ifdef CONFIG_RISCV_M_MODE diff --git a/arch/riscv/include/asm/elf.h b/arch/riscv/include/asm/elf.h index 5c725e1df58b..f4b490cd0e5d 100644 --- a/arch/riscv/include/asm/elf.h +++ b/arch/riscv/include/asm/elf.h @@ -81,4 +81,10 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp); #endif /* CONFIG_MMU */ +#define ELF_CORE_COPY_REGS(dest, regs) \ +do { \ + *(struct user_regs_struct *)&(dest) = \ + *(struct user_regs_struct *)regs; \ +} while (0); + #endif /* _ASM_RISCV_ELF_H */ diff --git a/arch/riscv/include/asm/errata_list.h b/arch/riscv/include/asm/errata_list.h new file mode 100644 index 000000000000..5f1046e82d9f --- /dev/null +++ b/arch/riscv/include/asm/errata_list.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2021 Sifive. + */ +#ifndef ASM_ERRATA_LIST_H +#define ASM_ERRATA_LIST_H + +#include <asm/alternative.h> +#include <asm/vendorid_list.h> + +#ifdef CONFIG_ERRATA_SIFIVE +#define ERRATA_SIFIVE_CIP_453 0 +#define ERRATA_SIFIVE_CIP_1200 1 +#define ERRATA_SIFIVE_NUMBER 2 +#endif + +#ifdef __ASSEMBLY__ + +#define ALT_INSN_FAULT(x) \ +ALTERNATIVE(__stringify(RISCV_PTR do_trap_insn_fault), \ + __stringify(RISCV_PTR sifive_cip_453_insn_fault_trp), \ + SIFIVE_VENDOR_ID, ERRATA_SIFIVE_CIP_453, \ + CONFIG_ERRATA_SIFIVE_CIP_453) + +#define ALT_PAGE_FAULT(x) \ +ALTERNATIVE(__stringify(RISCV_PTR do_page_fault), \ + __stringify(RISCV_PTR sifive_cip_453_page_fault_trp), \ + SIFIVE_VENDOR_ID, ERRATA_SIFIVE_CIP_453, \ + CONFIG_ERRATA_SIFIVE_CIP_453) +#else /* !__ASSEMBLY__ */ + +#define ALT_FLUSH_TLB_PAGE(x) \ +asm(ALTERNATIVE("sfence.vma %0", "sfence.vma", SIFIVE_VENDOR_ID, \ + ERRATA_SIFIVE_CIP_1200, CONFIG_ERRATA_SIFIVE_CIP_1200) \ + : : "r" (addr) : "memory") + +#endif /* __ASSEMBLY__ */ + +#endif diff --git a/arch/riscv/include/asm/ftrace.h b/arch/riscv/include/asm/ftrace.h index 845002cc2e57..04dad3380041 100644 --- a/arch/riscv/include/asm/ftrace.h +++ b/arch/riscv/include/asm/ftrace.h @@ -13,9 +13,19 @@ #endif #define HAVE_FUNCTION_GRAPH_RET_ADDR_PTR +/* + * Clang prior to 13 had "mcount" instead of "_mcount": + * https://reviews.llvm.org/D98881 + */ +#if defined(CONFIG_CC_IS_GCC) || CONFIG_CLANG_VERSION >= 130000 +#define MCOUNT_NAME _mcount +#else +#define MCOUNT_NAME mcount +#endif + #define ARCH_SUPPORTS_FTRACE_OPS 1 #ifndef __ASSEMBLY__ -void _mcount(void); +void MCOUNT_NAME(void); static inline unsigned long ftrace_call_adjust(unsigned long addr) { return addr; @@ -36,7 +46,7 @@ struct dyn_arch_ftrace { * both auipc and jalr at the same time. */ -#define MCOUNT_ADDR ((unsigned long)_mcount) +#define MCOUNT_ADDR ((unsigned long)MCOUNT_NAME) #define JALR_SIGN_MASK (0x00000800) #define JALR_OFFSET_MASK (0x00000fff) #define AUIPC_OFFSET_MASK (0xfffff000) diff --git a/arch/riscv/include/asm/kexec.h b/arch/riscv/include/asm/kexec.h new file mode 100644 index 000000000000..1e954101906a --- /dev/null +++ b/arch/riscv/include/asm/kexec.h @@ -0,0 +1,56 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2019 FORTH-ICS/CARV + * Nick Kossifidis <mick@ics.forth.gr> + */ + +#ifndef _RISCV_KEXEC_H +#define _RISCV_KEXEC_H + +#include <asm/page.h> /* For PAGE_SIZE */ + +/* Maximum physical address we can use pages from */ +#define KEXEC_SOURCE_MEMORY_LIMIT (-1UL) + +/* Maximum address we can reach in physical address mode */ +#define KEXEC_DESTINATION_MEMORY_LIMIT (-1UL) + +/* Maximum address we can use for the control code buffer */ +#define KEXEC_CONTROL_MEMORY_LIMIT (-1UL) + +/* Reserve a page for the control code buffer */ +#define KEXEC_CONTROL_PAGE_SIZE PAGE_SIZE + +#define KEXEC_ARCH KEXEC_ARCH_RISCV + +extern void riscv_crash_save_regs(struct pt_regs *newregs); + +static inline void +crash_setup_regs(struct pt_regs *newregs, + struct pt_regs *oldregs) +{ + if (oldregs) + memcpy(newregs, oldregs, sizeof(struct pt_regs)); + else + riscv_crash_save_regs(newregs); +} + + +#define ARCH_HAS_KIMAGE_ARCH + +struct kimage_arch { + unsigned long fdt_addr; +}; + +const extern unsigned char riscv_kexec_relocate[]; +const extern unsigned int riscv_kexec_relocate_size; + +typedef void (*riscv_kexec_method)(unsigned long first_ind_entry, + unsigned long jump_addr, + unsigned long fdt_addr, + unsigned long hartid, + unsigned long va_pa_off); + +extern riscv_kexec_method riscv_kexec_norelocate; + +#endif diff --git a/arch/riscv/include/asm/page.h b/arch/riscv/include/asm/page.h index adc9d26f3d75..6a7761c86ec2 100644 --- a/arch/riscv/include/asm/page.h +++ b/arch/riscv/include/asm/page.h @@ -90,15 +90,58 @@ typedef struct page *pgtable_t; #ifdef CONFIG_MMU extern unsigned long va_pa_offset; +#ifdef CONFIG_64BIT +extern unsigned long va_kernel_pa_offset; +#endif +#ifdef CONFIG_XIP_KERNEL +extern unsigned long va_kernel_xip_pa_offset; +#endif extern unsigned long pfn_base; #define ARCH_PFN_OFFSET (pfn_base) #else #define va_pa_offset 0 +#ifdef CONFIG_64BIT +#define va_kernel_pa_offset 0 +#endif #define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT) #endif /* CONFIG_MMU */ -#define __pa_to_va_nodebug(x) ((void *)((unsigned long) (x) + va_pa_offset)) -#define __va_to_pa_nodebug(x) ((unsigned long)(x) - va_pa_offset) +extern unsigned long kernel_virt_addr; + +#ifdef CONFIG_64BIT +#define linear_mapping_pa_to_va(x) ((void *)((unsigned long)(x) + va_pa_offset)) +#ifdef CONFIG_XIP_KERNEL +#define kernel_mapping_pa_to_va(y) ({ \ + unsigned long _y = y; \ + (_y >= CONFIG_PHYS_RAM_BASE) ? \ + (void *)((unsigned long)(_y) + va_kernel_pa_offset + XIP_OFFSET) : \ + (void *)((unsigned long)(_y) + va_kernel_xip_pa_offset); \ + }) +#else +#define kernel_mapping_pa_to_va(x) ((void *)((unsigned long)(x) + va_kernel_pa_offset)) +#endif +#define __pa_to_va_nodebug(x) linear_mapping_pa_to_va(x) + +#define linear_mapping_va_to_pa(x) ((unsigned long)(x) - va_pa_offset) +#ifdef CONFIG_XIP_KERNEL +#define kernel_mapping_va_to_pa(y) ({ \ + unsigned long _y = y; \ + (_y < kernel_virt_addr + XIP_OFFSET) ? \ + ((unsigned long)(_y) - va_kernel_xip_pa_offset) : \ + ((unsigned long)(_y) - va_kernel_pa_offset - XIP_OFFSET); \ + }) +#else +#define kernel_mapping_va_to_pa(x) ((unsigned long)(x) - va_kernel_pa_offset) +#endif +#define __va_to_pa_nodebug(x) ({ \ + unsigned long _x = x; \ + (_x < kernel_virt_addr) ? \ + linear_mapping_va_to_pa(_x) : kernel_mapping_va_to_pa(_x); \ + }) +#else +#define __pa_to_va_nodebug(x) ((void *)((unsigned long) (x) + va_pa_offset)) +#define __va_to_pa_nodebug(x) ((unsigned long)(x) - va_pa_offset) +#endif #ifdef CONFIG_DEBUG_VIRTUAL extern phys_addr_t __virt_to_phys(unsigned long x); diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h index ebf817c1bdf4..9469f464e71a 100644 --- a/arch/riscv/include/asm/pgtable.h +++ b/arch/riscv/include/asm/pgtable.h @@ -11,23 +11,38 @@ #include <asm/pgtable-bits.h> -#ifndef __ASSEMBLY__ +#ifndef CONFIG_MMU +#define KERNEL_LINK_ADDR PAGE_OFFSET +#else -/* Page Upper Directory not used in RISC-V */ -#include <asm-generic/pgtable-nopud.h> -#include <asm/page.h> -#include <asm/tlbflush.h> -#include <linux/mm_types.h> +#define ADDRESS_SPACE_END (UL(-1)) -#ifdef CONFIG_MMU +#ifdef CONFIG_64BIT +/* Leave 2GB for kernel and BPF at the end of the address space */ +#define KERNEL_LINK_ADDR (ADDRESS_SPACE_END - SZ_2G + 1) +#else +#define KERNEL_LINK_ADDR PAGE_OFFSET +#endif #define VMALLOC_SIZE (KERN_VIRT_SIZE >> 1) #define VMALLOC_END (PAGE_OFFSET - 1) #define VMALLOC_START (PAGE_OFFSET - VMALLOC_SIZE) #define BPF_JIT_REGION_SIZE (SZ_128M) +#ifdef CONFIG_64BIT +/* KASLR should leave at least 128MB for BPF after the kernel */ +#define BPF_JIT_REGION_START PFN_ALIGN((unsigned long)&_end) +#define BPF_JIT_REGION_END (BPF_JIT_REGION_START + BPF_JIT_REGION_SIZE) +#else #define BPF_JIT_REGION_START (PAGE_OFFSET - BPF_JIT_REGION_SIZE) #define BPF_JIT_REGION_END (VMALLOC_END) +#endif + +/* Modules always live before the kernel */ +#ifdef CONFIG_64BIT +#define MODULES_VADDR (PFN_ALIGN((unsigned long)&_end) - SZ_2G) +#define MODULES_END (PFN_ALIGN((unsigned long)&_start)) +#endif /* * Roughly size the vmemmap space to be large enough to fit enough @@ -60,12 +75,35 @@ #endif +#ifdef CONFIG_XIP_KERNEL +#define XIP_OFFSET SZ_8M +#endif + +#ifndef __ASSEMBLY__ + +/* Page Upper Directory not used in RISC-V */ +#include <asm-generic/pgtable-nopud.h> +#include <asm/page.h> +#include <asm/tlbflush.h> +#include <linux/mm_types.h> + #ifdef CONFIG_64BIT #include <asm/pgtable-64.h> #else #include <asm/pgtable-32.h> #endif /* CONFIG_64BIT */ +#ifdef CONFIG_XIP_KERNEL +#define XIP_FIXUP(addr) ({ \ + uintptr_t __a = (uintptr_t)(addr); \ + (__a >= CONFIG_XIP_PHYS_ADDR && __a < CONFIG_XIP_PHYS_ADDR + SZ_16M) ? \ + __a - CONFIG_XIP_PHYS_ADDR + CONFIG_PHYS_RAM_BASE - XIP_OFFSET :\ + __a; \ + }) +#else +#define XIP_FIXUP(addr) (addr) +#endif /* CONFIG_XIP_KERNEL */ + #ifdef CONFIG_MMU /* Number of entries in the page global directory */ #define PTRS_PER_PGD (PAGE_SIZE / sizeof(pgd_t)) @@ -484,8 +522,17 @@ static inline int ptep_clear_flush_young(struct vm_area_struct *vma, #define kern_addr_valid(addr) (1) /* FIXME */ -extern void *dtb_early_va; -extern uintptr_t dtb_early_pa; +extern char _start[]; +extern void *_dtb_early_va; +extern uintptr_t _dtb_early_pa; +#if defined(CONFIG_XIP_KERNEL) && defined(CONFIG_MMU) +#define dtb_early_va (*(void **)XIP_FIXUP(&_dtb_early_va)) +#define dtb_early_pa (*(uintptr_t *)XIP_FIXUP(&_dtb_early_pa)) +#else +#define dtb_early_va _dtb_early_va +#define dtb_early_pa _dtb_early_pa +#endif /* CONFIG_XIP_KERNEL */ + void setup_bootmem(void); void paging_init(void); void misc_mem_init(void); diff --git a/arch/riscv/include/asm/sbi.h b/arch/riscv/include/asm/sbi.h index d7027411dde8..0d42693cb65e 100644 --- a/arch/riscv/include/asm/sbi.h +++ b/arch/riscv/include/asm/sbi.h @@ -97,6 +97,9 @@ struct sbiret sbi_ecall(int ext, int fid, unsigned long arg0, void sbi_console_putchar(int ch); int sbi_console_getchar(void); +long sbi_get_mvendorid(void); +long sbi_get_marchid(void); +long sbi_get_mimpid(void); void sbi_set_timer(uint64_t stime_value); void sbi_shutdown(void); void sbi_clear_ipi(void); diff --git a/arch/riscv/include/asm/sections.h b/arch/riscv/include/asm/sections.h index 1595c5b60cfd..8a303fb1ee3b 100644 --- a/arch/riscv/include/asm/sections.h +++ b/arch/riscv/include/asm/sections.h @@ -11,5 +11,6 @@ extern char _start[]; extern char _start_kernel[]; extern char __init_data_begin[], __init_data_end[]; extern char __init_text_begin[], __init_text_end[]; +extern char __alt_start[], __alt_end[]; #endif /* __ASM_SECTIONS_H */ diff --git a/arch/riscv/include/asm/set_memory.h b/arch/riscv/include/asm/set_memory.h index 6887b3d9f371..a9c56776fa0e 100644 --- a/arch/riscv/include/asm/set_memory.h +++ b/arch/riscv/include/asm/set_memory.h @@ -17,6 +17,7 @@ int set_memory_x(unsigned long addr, int numpages); int set_memory_nx(unsigned long addr, int numpages); int set_memory_rw_nx(unsigned long addr, int numpages); void protect_kernel_text_data(void); +void protect_kernel_linear_mapping_text_rodata(void); #else static inline int set_memory_ro(unsigned long addr, int numpages) { return 0; } static inline int set_memory_rw(unsigned long addr, int numpages) { return 0; } diff --git a/arch/riscv/include/asm/smp.h b/arch/riscv/include/asm/smp.h index df1f7c4cd433..a7d2811f3536 100644 --- a/arch/riscv/include/asm/smp.h +++ b/arch/riscv/include/asm/smp.h @@ -46,7 +46,7 @@ int riscv_hartid_to_cpuid(int hartid); void riscv_cpuid_to_hartid_mask(const struct cpumask *in, struct cpumask *out); /* Set custom IPI operations */ -void riscv_set_ipi_ops(struct riscv_ipi_ops *ops); +void riscv_set_ipi_ops(const struct riscv_ipi_ops *ops); /* Clear IPI for current CPU */ void riscv_clear_ipi(void); @@ -92,7 +92,7 @@ static inline void riscv_cpuid_to_hartid_mask(const struct cpumask *in, cpumask_set_cpu(boot_cpu_hartid, out); } -static inline void riscv_set_ipi_ops(struct riscv_ipi_ops *ops) +static inline void riscv_set_ipi_ops(const struct riscv_ipi_ops *ops) { } diff --git a/arch/riscv/include/asm/string.h b/arch/riscv/include/asm/string.h index 5477e7ecb6e1..909049366555 100644 --- a/arch/riscv/include/asm/string.h +++ b/arch/riscv/include/asm/string.h @@ -23,5 +23,10 @@ extern asmlinkage void *__memmove(void *, const void *, size_t); #define memcpy(dst, src, len) __memcpy(dst, src, len) #define memset(s, c, n) __memset(s, c, n) #define memmove(dst, src, len) __memmove(dst, src, len) + +#ifndef __NO_FORTIFY +#define __NO_FORTIFY /* FORTIFY_SOURCE uses __builtin_memcpy, etc. */ +#endif + #endif #endif /* _ASM_RISCV_STRING_H */ diff --git a/arch/riscv/include/asm/syscall.h b/arch/riscv/include/asm/syscall.h index 49350c8bd7b0..b933b1583c9f 100644 --- a/arch/riscv/include/asm/syscall.h +++ b/arch/riscv/include/asm/syscall.h @@ -15,7 +15,7 @@ #include <linux/err.h> /* The array of function pointers for syscalls. */ -extern void *sys_call_table[]; +extern void * const sys_call_table[]; /* * Only the low 32 bits of orig_r0 are meaningful, so we return int. diff --git a/arch/riscv/include/asm/tlbflush.h b/arch/riscv/include/asm/tlbflush.h index 394cfbccdcd9..c84218ad7afc 100644 --- a/arch/riscv/include/asm/tlbflush.h +++ b/arch/riscv/include/asm/tlbflush.h @@ -9,6 +9,7 @@ #include <linux/mm_types.h> #include <asm/smp.h> +#include <asm/errata_list.h> #ifdef CONFIG_MMU static inline void local_flush_tlb_all(void) @@ -19,7 +20,7 @@ static inline void local_flush_tlb_all(void) /* Flush one page from local TLB */ static inline void local_flush_tlb_page(unsigned long addr) { - __asm__ __volatile__ ("sfence.vma %0" : : "r" (addr) : "memory"); + ALT_FLUSH_TLB_PAGE(__asm__ __volatile__ ("sfence.vma %0" : : "r" (addr) : "memory")); } #else /* CONFIG_MMU */ #define local_flush_tlb_all() do { } while (0) diff --git a/arch/riscv/include/asm/uaccess.h b/arch/riscv/include/asm/uaccess.h index f944062c9d99..f314ff44c48d 100644 --- a/arch/riscv/include/asm/uaccess.h +++ b/arch/riscv/include/asm/uaccess.h @@ -375,7 +375,6 @@ raw_copy_to_user(void __user *to, const void *from, unsigned long n) extern long strncpy_from_user(char *dest, const char __user *src, long count); -extern long __must_check strlen_user(const char __user *str); extern long __must_check strnlen_user(const char __user *str, long n); extern diff --git a/arch/riscv/include/asm/vendorid_list.h b/arch/riscv/include/asm/vendorid_list.h new file mode 100644 index 000000000000..9d934215b3c8 --- /dev/null +++ b/arch/riscv/include/asm/vendorid_list.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2021 SiFive + */ +#ifndef ASM_VENDOR_LIST_H +#define ASM_VENDOR_LIST_H + +#define SIFIVE_VENDOR_ID 0x489 + +#endif diff --git a/arch/riscv/kernel/Makefile b/arch/riscv/kernel/Makefile index 647a47f5484a..d3081e4d9600 100644 --- a/arch/riscv/kernel/Makefile +++ b/arch/riscv/kernel/Makefile @@ -10,6 +10,10 @@ CFLAGS_REMOVE_sbi.o = $(CC_FLAGS_FTRACE) endif CFLAGS_syscall_table.o += $(call cc-option,-Wno-override-init,) +ifdef CONFIG_KEXEC +AFLAGS_kexec_relocate.o := -mcmodel=medany -mno-relax +endif + extra-y += head.o extra-y += vmlinux.lds @@ -55,6 +59,8 @@ obj-$(CONFIG_SMP) += cpu_ops_sbi.o endif obj-$(CONFIG_HOTPLUG_CPU) += cpu-hotplug.o obj-$(CONFIG_KGDB) += kgdb.o +obj-$(CONFIG_KEXEC) += kexec_relocate.o crash_save_regs.o machine_kexec.o +obj-$(CONFIG_CRASH_DUMP) += crash_dump.o obj-$(CONFIG_JUMP_LABEL) += jump_label.o diff --git a/arch/riscv/kernel/crash_dump.c b/arch/riscv/kernel/crash_dump.c new file mode 100644 index 000000000000..86cc0ada5752 --- /dev/null +++ b/arch/riscv/kernel/crash_dump.c @@ -0,0 +1,46 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * This code comes from arch/arm64/kernel/crash_dump.c + * Created by: AKASHI Takahiro <takahiro.akashi@linaro.org> + * Copyright (C) 2017 Linaro Limited + */ + +#include <linux/crash_dump.h> +#include <linux/io.h> + +/** + * copy_oldmem_page() - copy one page from old kernel memory + * @pfn: page frame number to be copied + * @buf: buffer where the copied page is placed + * @csize: number of bytes to copy + * @offset: offset in bytes into the page + * @userbuf: if set, @buf is in a user address space + * + * This function copies one page from old kernel memory into buffer pointed by + * @buf. If @buf is in userspace, set @userbuf to %1. Returns number of bytes + * copied or negative error in case of failure. + */ +ssize_t copy_oldmem_page(unsigned long pfn, char *buf, + size_t csize, unsigned long offset, + int userbuf) +{ + void *vaddr; + + if (!csize) + return 0; + + vaddr = memremap(__pfn_to_phys(pfn), PAGE_SIZE, MEMREMAP_WB); + if (!vaddr) + return -ENOMEM; + + if (userbuf) { + if (copy_to_user((char __user *)buf, vaddr + offset, csize)) { + memunmap(vaddr); + return -EFAULT; + } + } else + memcpy(buf, vaddr + offset, csize); + + memunmap(vaddr); + return csize; +} diff --git a/arch/riscv/kernel/crash_save_regs.S b/arch/riscv/kernel/crash_save_regs.S new file mode 100644 index 000000000000..7832fb763aba --- /dev/null +++ b/arch/riscv/kernel/crash_save_regs.S @@ -0,0 +1,56 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020 FORTH-ICS/CARV + * Nick Kossifidis <mick@ics.forth.gr> + */ + +#include <asm/asm.h> /* For RISCV_* and REG_* macros */ +#include <asm/csr.h> /* For CSR_* macros */ +#include <asm/asm-offsets.h> /* For offsets on pt_regs */ +#include <linux/linkage.h> /* For SYM_* macros */ + +.section ".text" +SYM_CODE_START(riscv_crash_save_regs) + REG_S ra, PT_RA(a0) /* x1 */ + REG_S sp, PT_SP(a0) /* x2 */ + REG_S gp, PT_GP(a0) /* x3 */ + REG_S tp, PT_TP(a0) /* x4 */ + REG_S t0, PT_T0(a0) /* x5 */ + REG_S t1, PT_T1(a0) /* x6 */ + REG_S t2, PT_T2(a0) /* x7 */ + REG_S s0, PT_S0(a0) /* x8/fp */ + REG_S s1, PT_S1(a0) /* x9 */ + REG_S a0, PT_A0(a0) /* x10 */ + REG_S a1, PT_A1(a0) /* x11 */ + REG_S a2, PT_A2(a0) /* x12 */ + REG_S a3, PT_A3(a0) /* x13 */ + REG_S a4, PT_A4(a0) /* x14 */ + REG_S a5, PT_A5(a0) /* x15 */ + REG_S a6, PT_A6(a0) /* x16 */ + REG_S a7, PT_A7(a0) /* x17 */ + REG_S s2, PT_S2(a0) /* x18 */ + REG_S s3, PT_S3(a0) /* x19 */ + REG_S s4, PT_S4(a0) /* x20 */ + REG_S s5, PT_S5(a0) /* x21 */ + REG_S s6, PT_S6(a0) /* x22 */ + REG_S s7, PT_S7(a0) /* x23 */ + REG_S s8, PT_S8(a0) /* x24 */ + REG_S s9, PT_S9(a0) /* x25 */ + REG_S s10, PT_S10(a0) /* x26 */ + REG_S s11, PT_S11(a0) /* x27 */ + REG_S t3, PT_T3(a0) /* x28 */ + REG_S t4, PT_T4(a0) /* x29 */ + REG_S t5, PT_T5(a0) /* x30 */ + REG_S t6, PT_T6(a0) /* x31 */ + + csrr t1, CSR_STATUS + csrr t2, CSR_EPC + csrr t3, CSR_TVAL + csrr t4, CSR_CAUSE + + REG_S t1, PT_STATUS(a0) + REG_S t2, PT_EPC(a0) + REG_S t3, PT_BADADDR(a0) + REG_S t4, PT_CAUSE(a0) + ret +SYM_CODE_END(riscv_crash_save_regs) diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S index 83095faa680e..80d5a9e017b0 100644 --- a/arch/riscv/kernel/entry.S +++ b/arch/riscv/kernel/entry.S @@ -12,6 +12,7 @@ #include <asm/unistd.h> #include <asm/thread_info.h> #include <asm/asm-offsets.h> +#include <asm/errata_list.h> #if !IS_ENABLED(CONFIG_PREEMPTION) .set resume_kernel, restore_all @@ -454,7 +455,7 @@ ENDPROC(__switch_to) /* Exception vector table */ ENTRY(excp_vect_table) RISCV_PTR do_trap_insn_misaligned - RISCV_PTR do_trap_insn_fault + ALT_INSN_FAULT(RISCV_PTR do_trap_insn_fault) RISCV_PTR do_trap_insn_illegal RISCV_PTR do_trap_break RISCV_PTR do_trap_load_misaligned @@ -465,7 +466,8 @@ ENTRY(excp_vect_table) RISCV_PTR do_trap_ecall_s RISCV_PTR do_trap_unknown RISCV_PTR do_trap_ecall_m - RISCV_PTR do_page_fault /* instruction page fault */ + /* instruciton page fault */ + ALT_PAGE_FAULT(RISCV_PTR do_page_fault) RISCV_PTR do_page_fault /* load page fault */ RISCV_PTR do_trap_unknown RISCV_PTR do_page_fault /* store page fault */ diff --git a/arch/riscv/kernel/head.S b/arch/riscv/kernel/head.S index f5a9bad86e58..89cc58ab52b4 100644 --- a/arch/riscv/kernel/head.S +++ b/arch/riscv/kernel/head.S @@ -9,11 +9,23 @@ #include <linux/linkage.h> #include <asm/thread_info.h> #include <asm/page.h> +#include <asm/pgtable.h> #include <asm/csr.h> #include <asm/hwcap.h> #include <asm/image.h> #include "efi-header.S" +#ifdef CONFIG_XIP_KERNEL +.macro XIP_FIXUP_OFFSET reg + REG_L t0, _xip_fixup + add \reg, \reg, t0 +.endm +_xip_fixup: .dword CONFIG_PHYS_RAM_BASE - CONFIG_XIP_PHYS_ADDR - XIP_OFFSET +#else +.macro XIP_FIXUP_OFFSET reg +.endm +#endif /* CONFIG_XIP_KERNEL */ + __HEAD ENTRY(_start) /* @@ -69,7 +81,9 @@ pe_head_start: #ifdef CONFIG_MMU relocate: /* Relocate return address */ - li a1, PAGE_OFFSET + la a1, kernel_virt_addr + XIP_FIXUP_OFFSET a1 + REG_L a1, 0(a1) la a2, _start sub a1, a1, a2 add ra, ra, a1 @@ -91,6 +105,7 @@ relocate: * to ensure the new translations are in use. */ la a0, trampoline_pg_dir + XIP_FIXUP_OFFSET a0 srl a0, a0, PAGE_SHIFT or a0, a0, a1 sfence.vma @@ -144,7 +159,9 @@ secondary_start_sbi: slli a3, a0, LGREG la a4, __cpu_up_stack_pointer + XIP_FIXUP_OFFSET a4 la a5, __cpu_up_task_pointer + XIP_FIXUP_OFFSET a5 add a4, a3, a4 add a5, a3, a5 REG_L sp, (a4) @@ -156,6 +173,7 @@ secondary_start_common: #ifdef CONFIG_MMU /* Enable virtual memory and relocate to virtual address */ la a0, swapper_pg_dir + XIP_FIXUP_OFFSET a0 call relocate #endif call setup_trap_vector @@ -236,12 +254,33 @@ pmp_done: .Lgood_cores: #endif +#ifndef CONFIG_XIP_KERNEL /* Pick one hart to run the main boot sequence */ la a3, hart_lottery li a2, 1 amoadd.w a3, a2, (a3) bnez a3, .Lsecondary_start +#else + /* hart_lottery in flash contains a magic number */ + la a3, hart_lottery + mv a2, a3 + XIP_FIXUP_OFFSET a2 + lw t1, (a3) + amoswap.w t0, t1, (a2) + /* first time here if hart_lottery in RAM is not set */ + beq t0, t1, .Lsecondary_start + + la sp, _end + THREAD_SIZE + XIP_FIXUP_OFFSET sp + mv s0, a0 + call __copy_data + + /* Restore a0 copy */ + mv a0, s0 +#endif + +#ifndef CONFIG_XIP_KERNEL /* Clear BSS for flat non-ELF images */ la a3, __bss_start la a4, __bss_stop @@ -251,15 +290,18 @@ clear_bss: add a3, a3, RISCV_SZPTR blt a3, a4, clear_bss clear_bss_done: - +#endif /* Save hart ID and DTB physical address */ mv s0, a0 mv s1, a1 + la a2, boot_cpu_hartid + XIP_FIXUP_OFFSET a2 REG_S a0, (a2) /* Initialize page tables and relocate to virtual addresses */ la sp, init_thread_union + THREAD_SIZE + XIP_FIXUP_OFFSET sp #ifdef CONFIG_BUILTIN_DTB la a0, __dtb_start #else @@ -268,6 +310,7 @@ clear_bss_done: call setup_vm #ifdef CONFIG_MMU la a0, early_pg_dir + XIP_FIXUP_OFFSET a0 call relocate #endif /* CONFIG_MMU */ @@ -292,7 +335,9 @@ clear_bss_done: slli a3, a0, LGREG la a1, __cpu_up_stack_pointer + XIP_FIXUP_OFFSET a1 la a2, __cpu_up_task_pointer + XIP_FIXUP_OFFSET a2 add a1, a3, a1 add a2, a3, a2 diff --git a/arch/riscv/kernel/head.h b/arch/riscv/kernel/head.h index b48dda3d04f6..aabbc3ac3e48 100644 --- a/arch/riscv/kernel/head.h +++ b/arch/riscv/kernel/head.h @@ -12,6 +12,9 @@ extern atomic_t hart_lottery; asmlinkage void do_page_fault(struct pt_regs *regs); asmlinkage void __init setup_vm(uintptr_t dtb_pa); +#ifdef CONFIG_XIP_KERNEL +asmlinkage void __init __copy_data(void); +#endif extern void *__cpu_up_stack_pointer[]; extern void *__cpu_up_task_pointer[]; diff --git a/arch/riscv/kernel/kexec_relocate.S b/arch/riscv/kernel/kexec_relocate.S new file mode 100644 index 000000000000..88c3beabe9b4 --- /dev/null +++ b/arch/riscv/kernel/kexec_relocate.S @@ -0,0 +1,223 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2019 FORTH-ICS/CARV + * Nick Kossifidis <mick@ics.forth.gr> + */ + +#include <asm/asm.h> /* For RISCV_* and REG_* macros */ +#include <asm/csr.h> /* For CSR_* macros */ +#include <asm/page.h> /* For PAGE_SIZE */ +#include <linux/linkage.h> /* For SYM_* macros */ + +.section ".rodata" +SYM_CODE_START(riscv_kexec_relocate) + + /* + * s0: Pointer to the current entry + * s1: (const) Phys address to jump to after relocation + * s2: (const) Phys address of the FDT image + * s3: (const) The hartid of the current hart + * s4: Pointer to the destination address for the relocation + * s5: (const) Number of words per page + * s6: (const) 1, used for subtraction + * s7: (const) va_pa_offset, used when switching MMU off + * s8: (const) Physical address of the main loop + * s9: (debug) indirection page counter + * s10: (debug) entry counter + * s11: (debug) copied words counter + */ + mv s0, a0 + mv s1, a1 + mv s2, a2 + mv s3, a3 + mv s4, zero + li s5, (PAGE_SIZE / RISCV_SZPTR) + li s6, 1 + mv s7, a4 + mv s8, zero + mv s9, zero + mv s10, zero + mv s11, zero + + /* Disable / cleanup interrupts */ + csrw CSR_SIE, zero + csrw CSR_SIP, zero + + /* + * When we switch SATP.MODE to "Bare" we'll only + * play with physical addresses. However the first time + * we try to jump somewhere, the offset on the jump + * will be relative to pc which will still be on VA. To + * deal with this we set stvec to the physical address at + * the start of the loop below so that we jump there in + * any case. + */ + la s8, 1f + sub s8, s8, s7 + csrw CSR_STVEC, s8 + + /* Process entries in a loop */ +.align 2 +1: + addi s10, s10, 1 + REG_L t0, 0(s0) /* t0 = *image->entry */ + addi s0, s0, RISCV_SZPTR /* image->entry++ */ + + /* IND_DESTINATION entry ? -> save destination address */ + andi t1, t0, 0x1 + beqz t1, 2f + andi s4, t0, ~0x1 + j 1b + +2: + /* IND_INDIRECTION entry ? -> update next entry ptr (PA) */ + andi t1, t0, 0x2 + beqz t1, 2f + andi s0, t0, ~0x2 + addi s9, s9, 1 + csrw CSR_SATP, zero + jalr zero, s8, 0 + +2: + /* IND_DONE entry ? -> jump to done label */ + andi t1, t0, 0x4 + beqz t1, 2f + j 4f + +2: + /* + * IND_SOURCE entry ? -> copy page word by word to the + * destination address we got from IND_DESTINATION + */ + andi t1, t0, 0x8 + beqz t1, 1b /* Unknown entry type, ignore it */ + andi t0, t0, ~0x8 + mv t3, s5 /* i = num words per page */ +3: /* copy loop */ + REG_L t1, (t0) /* t1 = *src_ptr */ + REG_S t1, (s4) /* *dst_ptr = *src_ptr */ + addi t0, t0, RISCV_SZPTR /* stc_ptr++ */ + addi s4, s4, RISCV_SZPTR /* dst_ptr++ */ + sub t3, t3, s6 /* i-- */ + addi s11, s11, 1 /* c++ */ + beqz t3, 1b /* copy done ? */ + j 3b + +4: + /* Pass the arguments to the next kernel / Cleanup*/ + mv a0, s3 + mv a1, s2 + mv a2, s1 + + /* Cleanup */ + mv a3, zero + mv a4, zero + mv a5, zero + mv a6, zero + mv a7, zero + + mv s0, zero + mv s1, zero + mv s2, zero + mv s3, zero + mv s4, zero + mv s5, zero + mv s6, zero + mv s7, zero + mv s8, zero + mv s9, zero + mv s10, zero + mv s11, zero + + mv t0, zero + mv t1, zero + mv t2, zero + mv t3, zero + mv t4, zero + mv t5, zero + mv t6, zero + csrw CSR_SEPC, zero + csrw CSR_SCAUSE, zero + csrw CSR_SSCRATCH, zero + + /* + * Make sure the relocated code is visible + * and jump to the new kernel + */ + fence.i + + jalr zero, a2, 0 + +SYM_CODE_END(riscv_kexec_relocate) +riscv_kexec_relocate_end: + + +/* Used for jumping to crashkernel */ +.section ".text" +SYM_CODE_START(riscv_kexec_norelocate) + /* + * s0: (const) Phys address to jump to + * s1: (const) Phys address of the FDT image + * s2: (const) The hartid of the current hart + * s3: (const) va_pa_offset, used when switching MMU off + */ + mv s0, a1 + mv s1, a2 + mv s2, a3 + mv s3, a4 + + /* Disable / cleanup interrupts */ + csrw CSR_SIE, zero + csrw CSR_SIP, zero + + /* Switch to physical addressing */ + la s4, 1f + sub s4, s4, s3 + csrw CSR_STVEC, s4 + csrw CSR_SATP, zero + +.align 2 +1: + /* Pass the arguments to the next kernel / Cleanup*/ + mv a0, s2 + mv a1, s1 + mv a2, s0 + + /* Cleanup */ + mv a3, zero + mv a4, zero + mv a5, zero + mv a6, zero + mv a7, zero + + mv s0, zero + mv s1, zero + mv s2, zero + mv s3, zero + mv s4, zero + mv s5, zero + mv s6, zero + mv s7, zero + mv s8, zero + mv s9, zero + mv s10, zero + mv s11, zero + + mv t0, zero + mv t1, zero + mv t2, zero + mv t3, zero + mv t4, zero + mv t5, zero + mv t6, zero + csrw CSR_SEPC, zero + csrw CSR_SCAUSE, zero + csrw CSR_SSCRATCH, zero + + jalr zero, a2, 0 +SYM_CODE_END(riscv_kexec_norelocate) + +.section ".rodata" +SYM_DATA(riscv_kexec_relocate_size, + .long riscv_kexec_relocate_end - riscv_kexec_relocate) + diff --git a/arch/riscv/kernel/machine_kexec.c b/arch/riscv/kernel/machine_kexec.c new file mode 100644 index 000000000000..cc048143fba5 --- /dev/null +++ b/arch/riscv/kernel/machine_kexec.c @@ -0,0 +1,193 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2019 FORTH-ICS/CARV + * Nick Kossifidis <mick@ics.forth.gr> + */ + +#include <linux/kexec.h> +#include <asm/kexec.h> /* For riscv_kexec_* symbol defines */ +#include <linux/smp.h> /* For smp_send_stop () */ +#include <asm/cacheflush.h> /* For local_flush_icache_all() */ +#include <asm/barrier.h> /* For smp_wmb() */ +#include <asm/page.h> /* For PAGE_MASK */ +#include <linux/libfdt.h> /* For fdt_check_header() */ +#include <asm/set_memory.h> /* For set_memory_x() */ +#include <linux/compiler.h> /* For unreachable() */ +#include <linux/cpu.h> /* For cpu_down() */ + +/** + * kexec_image_info - Print received image details + */ +static void +kexec_image_info(const struct kimage *image) +{ + unsigned long i; + + pr_debug("Kexec image info:\n"); + pr_debug("\ttype: %d\n", image->type); + pr_debug("\tstart: %lx\n", image->start); + pr_debug("\thead: %lx\n", image->head); + pr_debug("\tnr_segments: %lu\n", image->nr_segments); + + for (i = 0; i < image->nr_segments; i++) { + pr_debug("\t segment[%lu]: %016lx - %016lx", i, + image->segment[i].mem, + image->segment[i].mem + image->segment[i].memsz); + pr_debug("\t\t0x%lx bytes, %lu pages\n", + (unsigned long) image->segment[i].memsz, + (unsigned long) image->segment[i].memsz / PAGE_SIZE); + } +} + +/** + * machine_kexec_prepare - Initialize kexec + * + * This function is called from do_kexec_load, when the user has + * provided us with an image to be loaded. Its goal is to validate + * the image and prepare the control code buffer as needed. + * Note that kimage_alloc_init has already been called and the + * control buffer has already been allocated. + */ +int +machine_kexec_prepare(struct kimage *image) +{ + struct kimage_arch *internal = &image->arch; + struct fdt_header fdt = {0}; + void *control_code_buffer = NULL; + unsigned int control_code_buffer_sz = 0; + int i = 0; + + kexec_image_info(image); + + /* Find the Flattened Device Tree and save its physical address */ + for (i = 0; i < image->nr_segments; i++) { + if (image->segment[i].memsz <= sizeof(fdt)) + continue; + + if (copy_from_user(&fdt, image->segment[i].buf, sizeof(fdt))) + continue; + + if (fdt_check_header(&fdt)) + continue; + + internal->fdt_addr = (unsigned long) image->segment[i].mem; + break; + } + + if (!internal->fdt_addr) { + pr_err("Device tree not included in the provided image\n"); + return -EINVAL; + } + + /* Copy the assembler code for relocation to the control page */ + if (image->type != KEXEC_TYPE_CRASH) { + control_code_buffer = page_address(image->control_code_page); + control_code_buffer_sz = page_size(image->control_code_page); + + if (unlikely(riscv_kexec_relocate_size > control_code_buffer_sz)) { + pr_err("Relocation code doesn't fit within a control page\n"); + return -EINVAL; + } + + memcpy(control_code_buffer, riscv_kexec_relocate, + riscv_kexec_relocate_size); + + /* Mark the control page executable */ + set_memory_x((unsigned long) control_code_buffer, 1); + } + + return 0; +} + + +/** + * machine_kexec_cleanup - Cleanup any leftovers from + * machine_kexec_prepare + * + * This function is called by kimage_free to handle any arch-specific + * allocations done on machine_kexec_prepare. Since we didn't do any + * allocations there, this is just an empty function. Note that the + * control buffer is freed by kimage_free. + */ +void +machine_kexec_cleanup(struct kimage *image) +{ +} + + +/* + * machine_shutdown - Prepare for a kexec reboot + * + * This function is called by kernel_kexec just before machine_kexec + * below. Its goal is to prepare the rest of the system (the other + * harts and possibly devices etc) for a kexec reboot. + */ +void machine_shutdown(void) +{ + /* + * No more interrupts on this hart + * until we are back up. + */ + local_irq_disable(); + +#if defined(CONFIG_HOTPLUG_CPU) + smp_shutdown_nonboot_cpus(smp_processor_id()); +#endif +} + +/** + * machine_crash_shutdown - Prepare to kexec after a kernel crash + * + * This function is called by crash_kexec just before machine_kexec + * below and its goal is similar to machine_shutdown, but in case of + * a kernel crash. Since we don't handle such cases yet, this function + * is empty. + */ +void +machine_crash_shutdown(struct pt_regs *regs) +{ + crash_save_cpu(regs, smp_processor_id()); + machine_shutdown(); + pr_info("Starting crashdump kernel...\n"); +} + +/** + * machine_kexec - Jump to the loaded kimage + * + * This function is called by kernel_kexec which is called by the + * reboot system call when the reboot cmd is LINUX_REBOOT_CMD_KEXEC, + * or by crash_kernel which is called by the kernel's arch-specific + * trap handler in case of a kernel panic. It's the final stage of + * the kexec process where the pre-loaded kimage is ready to be + * executed. We assume at this point that all other harts are + * suspended and this hart will be the new boot hart. + */ +void __noreturn +machine_kexec(struct kimage *image) +{ + struct kimage_arch *internal = &image->arch; + unsigned long jump_addr = (unsigned long) image->start; + unsigned long first_ind_entry = (unsigned long) &image->head; + unsigned long this_hart_id = raw_smp_processor_id(); + unsigned long fdt_addr = internal->fdt_addr; + void *control_code_buffer = page_address(image->control_code_page); + riscv_kexec_method kexec_method = NULL; + + if (image->type != KEXEC_TYPE_CRASH) + kexec_method = control_code_buffer; + else + kexec_method = (riscv_kexec_method) &riscv_kexec_norelocate; + + pr_notice("Will call new kernel at %08lx from hart id %lx\n", + jump_addr, this_hart_id); + pr_notice("FDT image at %08lx\n", fdt_addr); + + /* Make sure the relocation code is visible to the hart */ + local_flush_icache_all(); + + /* Jump to the relocation code */ + pr_notice("Bye...\n"); + kexec_method(first_ind_entry, jump_addr, fdt_addr, + this_hart_id, va_pa_offset); + unreachable(); +} diff --git a/arch/riscv/kernel/mcount.S b/arch/riscv/kernel/mcount.S index 8a5593ff9ff3..6d462681c9c0 100644 --- a/arch/riscv/kernel/mcount.S +++ b/arch/riscv/kernel/mcount.S @@ -47,8 +47,8 @@ ENTRY(ftrace_stub) #ifdef CONFIG_DYNAMIC_FTRACE - .global _mcount - .set _mcount, ftrace_stub + .global MCOUNT_NAME + .set MCOUNT_NAME, ftrace_stub #endif ret ENDPROC(ftrace_stub) @@ -78,7 +78,7 @@ ENDPROC(return_to_handler) #endif #ifndef CONFIG_DYNAMIC_FTRACE -ENTRY(_mcount) +ENTRY(MCOUNT_NAME) la t4, ftrace_stub #ifdef CONFIG_FUNCTION_GRAPH_TRACER la t0, ftrace_graph_return @@ -124,6 +124,6 @@ do_trace: jalr t5 RESTORE_ABI_STATE ret -ENDPROC(_mcount) +ENDPROC(MCOUNT_NAME) #endif -EXPORT_SYMBOL(_mcount) +EXPORT_SYMBOL(MCOUNT_NAME) diff --git a/arch/riscv/kernel/module.c b/arch/riscv/kernel/module.c index 104fba889cf7..68a9e3d1fe16 100644 --- a/arch/riscv/kernel/module.c +++ b/arch/riscv/kernel/module.c @@ -408,13 +408,11 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab, } #if defined(CONFIG_MMU) && defined(CONFIG_64BIT) -#define VMALLOC_MODULE_START \ - max(PFN_ALIGN((unsigned long)&_end - SZ_2G), VMALLOC_START) void *module_alloc(unsigned long size) { - return __vmalloc_node_range(size, 1, VMALLOC_MODULE_START, - VMALLOC_END, GFP_KERNEL, - PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE, + return __vmalloc_node_range(size, 1, MODULES_VADDR, + MODULES_END, GFP_KERNEL, + PAGE_KERNEL, 0, NUMA_NO_NODE, __builtin_return_address(0)); } #endif diff --git a/arch/riscv/kernel/probes/kprobes.c b/arch/riscv/kernel/probes/kprobes.c index 7e2c78e2ca6b..10b965c34536 100644 --- a/arch/riscv/kernel/probes/kprobes.c +++ b/arch/riscv/kernel/probes/kprobes.c @@ -84,6 +84,14 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p) return 0; } +void *alloc_insn_page(void) +{ + return __vmalloc_node_range(PAGE_SIZE, 1, VMALLOC_START, VMALLOC_END, + GFP_KERNEL, PAGE_KERNEL_READ_EXEC, + VM_FLUSH_RESET_PERMS, NUMA_NO_NODE, + __builtin_return_address(0)); +} + /* install breakpoint in text */ void __kprobes arch_arm_kprobe(struct kprobe *p) { @@ -260,8 +268,10 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int trapnr) if (kcb->kprobe_status == KPROBE_REENTER) restore_previous_kprobe(kcb); - else + else { + kprobes_restore_local_irqflag(kcb, regs); reset_current_kprobe(); + } break; case KPROBE_HIT_ACTIVE: diff --git a/arch/riscv/kernel/sbi.c b/arch/riscv/kernel/sbi.c index d3bf756321a5..7402a417f38e 100644 --- a/arch/riscv/kernel/sbi.c +++ b/arch/riscv/kernel/sbi.c @@ -11,14 +11,14 @@ #include <asm/smp.h> /* default SBI version is 0.1 */ -unsigned long sbi_spec_version = SBI_SPEC_VERSION_DEFAULT; +unsigned long sbi_spec_version __ro_after_init = SBI_SPEC_VERSION_DEFAULT; EXPORT_SYMBOL(sbi_spec_version); -static void (*__sbi_set_timer)(uint64_t stime); -static int (*__sbi_send_ipi)(const unsigned long *hart_mask); +static void (*__sbi_set_timer)(uint64_t stime) __ro_after_init; +static int (*__sbi_send_ipi)(const unsigned long *hart_mask) __ro_after_init; static int (*__sbi_rfence)(int fid, const unsigned long *hart_mask, unsigned long start, unsigned long size, - unsigned long arg4, unsigned long arg5); + unsigned long arg4, unsigned long arg5) __ro_after_init; struct sbiret sbi_ecall(int ext, int fid, unsigned long arg0, unsigned long arg1, unsigned long arg2, @@ -547,6 +547,21 @@ static inline long sbi_get_firmware_version(void) return __sbi_base_ecall(SBI_EXT_BASE_GET_IMP_VERSION); } +long sbi_get_mvendorid(void) +{ + return __sbi_base_ecall(SBI_EXT_BASE_GET_MVENDORID); +} + +long sbi_get_marchid(void) +{ + return __sbi_base_ecall(SBI_EXT_BASE_GET_MARCHID); +} + +long sbi_get_mimpid(void) +{ + return __sbi_base_ecall(SBI_EXT_BASE_GET_MIMPID); +} + static void sbi_send_cpumask_ipi(const struct cpumask *target) { struct cpumask hartid_mask; @@ -556,7 +571,7 @@ static void sbi_send_cpumask_ipi(const struct cpumask *target) sbi_send_ipi(cpumask_bits(&hartid_mask)); } -static struct riscv_ipi_ops sbi_ipi_ops = { +static const struct riscv_ipi_ops sbi_ipi_ops = { .ipi_inject = sbi_send_cpumask_ipi }; @@ -577,19 +592,19 @@ void __init sbi_init(void) sbi_get_firmware_id(), sbi_get_firmware_version()); if (sbi_probe_extension(SBI_EXT_TIME) > 0) { __sbi_set_timer = __sbi_set_timer_v02; - pr_info("SBI v0.2 TIME extension detected\n"); + pr_info("SBI TIME extension detected\n"); } else { __sbi_set_timer = __sbi_set_timer_v01; } if (sbi_probe_extension(SBI_EXT_IPI) > 0) { __sbi_send_ipi = __sbi_send_ipi_v02; - pr_info("SBI v0.2 IPI extension detected\n"); + pr_info("SBI IPI extension detected\n"); } else { __sbi_send_ipi = __sbi_send_ipi_v01; } if (sbi_probe_extension(SBI_EXT_RFENCE) > 0) { __sbi_rfence = __sbi_rfence_v02; - pr_info("SBI v0.2 RFENCE extension detected\n"); + pr_info("SBI RFENCE extension detected\n"); } else { __sbi_rfence = __sbi_rfence_v01; } diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c index f8f15332caa2..7b31779101f6 100644 --- a/arch/riscv/kernel/setup.c +++ b/arch/riscv/kernel/setup.c @@ -20,9 +20,11 @@ #include <linux/swiotlb.h> #include <linux/smp.h> #include <linux/efi.h> +#include <linux/crash_dump.h> #include <asm/cpu_ops.h> #include <asm/early_ioremap.h> +#include <asm/pgtable.h> #include <asm/setup.h> #include <asm/set_memory.h> #include <asm/sections.h> @@ -50,7 +52,11 @@ struct screen_info screen_info __section(".data") = { * This is used before the kernel initializes the BSS so it can't be in the * BSS. */ -atomic_t hart_lottery __section(".sdata"); +atomic_t hart_lottery __section(".sdata") +#ifdef CONFIG_XIP_KERNEL += ATOMIC_INIT(0xC001BEEF) +#endif +; unsigned long boot_cpu_hartid; static DEFINE_PER_CPU(struct cpu, cpu_devices); @@ -60,10 +66,14 @@ static DEFINE_PER_CPU(struct cpu, cpu_devices); * also add "System RAM" regions for compatibility with other * archs, and the rest of the known regions for completeness. */ +static struct resource kimage_res = { .name = "Kernel image", }; static struct resource code_res = { .name = "Kernel code", }; static struct resource data_res = { .name = "Kernel data", }; static struct resource rodata_res = { .name = "Kernel rodata", }; static struct resource bss_res = { .name = "Kernel bss", }; +#ifdef CONFIG_CRASH_DUMP +static struct resource elfcorehdr_res = { .name = "ELF Core hdr", }; +#endif static int __init add_resource(struct resource *parent, struct resource *res) @@ -80,45 +90,54 @@ static int __init add_resource(struct resource *parent, return 1; } -static int __init add_kernel_resources(struct resource *res) +static int __init add_kernel_resources(void) { int ret = 0; /* * The memory region of the kernel image is continuous and - * was reserved on setup_bootmem, find it here and register - * it as a resource, then register the various segments of - * the image as child nodes + * was reserved on setup_bootmem, register it here as a + * resource, with the various segments of the image as + * child nodes. */ - if (!(res->start <= code_res.start && res->end >= data_res.end)) - return 0; - res->name = "Kernel image"; - res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; + code_res.start = __pa_symbol(_text); + code_res.end = __pa_symbol(_etext) - 1; + code_res.flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; - /* - * We removed a part of this region on setup_bootmem so - * we need to expand the resource for the bss to fit in. - */ - res->end = bss_res.end; + rodata_res.start = __pa_symbol(__start_rodata); + rodata_res.end = __pa_symbol(__end_rodata) - 1; + rodata_res.flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; + + data_res.start = __pa_symbol(_data); + data_res.end = __pa_symbol(_edata) - 1; + data_res.flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; + + bss_res.start = __pa_symbol(__bss_start); + bss_res.end = __pa_symbol(__bss_stop) - 1; + bss_res.flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; + + kimage_res.start = code_res.start; + kimage_res.end = bss_res.end; + kimage_res.flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; - ret = add_resource(&iomem_resource, res); + ret = add_resource(&iomem_resource, &kimage_res); if (ret < 0) return ret; - ret = add_resource(res, &code_res); + ret = add_resource(&kimage_res, &code_res); if (ret < 0) return ret; - ret = add_resource(res, &rodata_res); + ret = add_resource(&kimage_res, &rodata_res); if (ret < 0) return ret; - ret = add_resource(res, &data_res); + ret = add_resource(&kimage_res, &data_res); if (ret < 0) return ret; - ret = add_resource(res, &bss_res); + ret = add_resource(&kimage_res, &bss_res); return ret; } @@ -129,54 +148,59 @@ static void __init init_resources(void) struct resource *res = NULL; struct resource *mem_res = NULL; size_t mem_res_sz = 0; - int ret = 0, i = 0; - - code_res.start = __pa_symbol(_text); - code_res.end = __pa_symbol(_etext) - 1; - code_res.flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; - - rodata_res.start = __pa_symbol(__start_rodata); - rodata_res.end = __pa_symbol(__end_rodata) - 1; - rodata_res.flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; - - data_res.start = __pa_symbol(_data); - data_res.end = __pa_symbol(_edata) - 1; - data_res.flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; - - bss_res.start = __pa_symbol(__bss_start); - bss_res.end = __pa_symbol(__bss_stop) - 1; - bss_res.flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; + int num_resources = 0, res_idx = 0; + int ret = 0; /* + 1 as memblock_alloc() might increase memblock.reserved.cnt */ - mem_res_sz = (memblock.memory.cnt + memblock.reserved.cnt + 1) * sizeof(*mem_res); + num_resources = memblock.memory.cnt + memblock.reserved.cnt + 1; + res_idx = num_resources - 1; + + mem_res_sz = num_resources * sizeof(*mem_res); mem_res = memblock_alloc(mem_res_sz, SMP_CACHE_BYTES); if (!mem_res) panic("%s: Failed to allocate %zu bytes\n", __func__, mem_res_sz); + /* * Start by adding the reserved regions, if they overlap * with /memory regions, insert_resource later on will take * care of it. */ + ret = add_kernel_resources(); + if (ret < 0) + goto error; + +#ifdef CONFIG_KEXEC_CORE + if (crashk_res.start != crashk_res.end) { + ret = add_resource(&iomem_resource, &crashk_res); + if (ret < 0) + goto error; + } +#endif + +#ifdef CONFIG_CRASH_DUMP + if (elfcorehdr_size > 0) { + elfcorehdr_res.start = elfcorehdr_addr; + elfcorehdr_res.end = elfcorehdr_addr + elfcorehdr_size - 1; + elfcorehdr_res.flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; + add_resource(&iomem_resource, &elfcorehdr_res); + } +#endif + for_each_reserved_mem_region(region) { - res = &mem_res[i++]; + res = &mem_res[res_idx--]; res->name = "Reserved"; res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; res->start = __pfn_to_phys(memblock_region_reserved_base_pfn(region)); res->end = __pfn_to_phys(memblock_region_reserved_end_pfn(region)) - 1; - ret = add_kernel_resources(res); - if (ret < 0) - goto error; - else if (ret) - continue; - /* * Ignore any other reserved regions within * system memory. */ if (memblock_is_memory(res->start)) { - memblock_free((phys_addr_t) res, sizeof(struct resource)); + /* Re-use this pre-allocated resource */ + res_idx++; continue; } @@ -187,7 +211,7 @@ static void __init init_resources(void) /* Add /memory regions to the resource tree */ for_each_mem_region(region) { - res = &mem_res[i++]; + res = &mem_res[res_idx--]; if (unlikely(memblock_is_nomap(region))) { res->name = "Reserved"; @@ -205,6 +229,9 @@ static void __init init_resources(void) goto error; } + /* Clean-up any unused pre-allocated resources */ + mem_res_sz = (num_resources - res_idx + 1) * sizeof(*mem_res); + memblock_free((phys_addr_t) mem_res, mem_res_sz); return; error: @@ -251,21 +278,26 @@ void __init setup_arch(char **cmdline_p) efi_init(); setup_bootmem(); paging_init(); - init_resources(); #if IS_ENABLED(CONFIG_BUILTIN_DTB) unflatten_and_copy_device_tree(); #else - if (early_init_dt_verify(__va(dtb_early_pa))) + if (early_init_dt_verify(__va(XIP_FIXUP(dtb_early_pa)))) unflatten_device_tree(); else pr_err("No DTB found in kernel mappings\n"); #endif misc_mem_init(); + init_resources(); sbi_init(); - if (IS_ENABLED(CONFIG_STRICT_KERNEL_RWX)) + if (IS_ENABLED(CONFIG_STRICT_KERNEL_RWX)) { protect_kernel_text_data(); +#if defined(CONFIG_64BIT) && defined(CONFIG_MMU) && !defined(CONFIG_XIP_KERNEL) + protect_kernel_linear_mapping_text_rodata(); +#endif + } + #ifdef CONFIG_SWIOTLB swiotlb_init(1); #endif diff --git a/arch/riscv/kernel/smp.c b/arch/riscv/kernel/smp.c index ea028d9e0d24..921d9d7df400 100644 --- a/arch/riscv/kernel/smp.c +++ b/arch/riscv/kernel/smp.c @@ -9,6 +9,7 @@ */ #include <linux/cpu.h> +#include <linux/clockchips.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/profile.h> @@ -27,10 +28,11 @@ enum ipi_message_type { IPI_CALL_FUNC, IPI_CPU_STOP, IPI_IRQ_WORK, + IPI_TIMER, IPI_MAX }; -unsigned long __cpuid_to_hartid_map[NR_CPUS] = { +unsigned long __cpuid_to_hartid_map[NR_CPUS] __ro_after_init = { [0 ... NR_CPUS-1] = INVALID_HARTID }; @@ -54,7 +56,7 @@ int riscv_hartid_to_cpuid(int hartid) return i; pr_err("Couldn't find cpu id for hartid [%d]\n", hartid); - return i; + return -ENOENT; } void riscv_cpuid_to_hartid_mask(const struct cpumask *in, struct cpumask *out) @@ -85,9 +87,9 @@ static void ipi_stop(void) wait_for_interrupt(); } -static struct riscv_ipi_ops *ipi_ops; +static const struct riscv_ipi_ops *ipi_ops __ro_after_init; -void riscv_set_ipi_ops(struct riscv_ipi_ops *ops) +void riscv_set_ipi_ops(const struct riscv_ipi_ops *ops) { ipi_ops = ops; } @@ -176,6 +178,12 @@ void handle_IPI(struct pt_regs *regs) irq_work_run(); } +#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST + if (ops & (1 << IPI_TIMER)) { + stats[IPI_TIMER]++; + tick_receive_broadcast(); + } +#endif BUG_ON((ops >> IPI_MAX) != 0); /* Order data access and bit testing. */ @@ -192,6 +200,7 @@ static const char * const ipi_names[] = { [IPI_CALL_FUNC] = "Function call interrupts", [IPI_CPU_STOP] = "CPU stop interrupts", [IPI_IRQ_WORK] = "IRQ work interrupts", + [IPI_TIMER] = "Timer broadcast interrupts", }; void show_ipi_stats(struct seq_file *p, int prec) @@ -217,6 +226,13 @@ void arch_send_call_function_single_ipi(int cpu) send_ipi_single(cpu, IPI_CALL_FUNC); } +#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST +void tick_broadcast(const struct cpumask *mask) +{ + send_ipi_mask(mask, IPI_TIMER); +} +#endif + void smp_send_stop(void) { unsigned long timeout; diff --git a/arch/riscv/kernel/smpboot.c b/arch/riscv/kernel/smpboot.c index 5e276c25646f..9a408e2942ac 100644 --- a/arch/riscv/kernel/smpboot.c +++ b/arch/riscv/kernel/smpboot.c @@ -32,6 +32,7 @@ #include <asm/sections.h> #include <asm/sbi.h> #include <asm/smp.h> +#include <asm/alternative.h> #include "head.h" @@ -40,6 +41,9 @@ static DECLARE_COMPLETION(cpu_running); void __init smp_prepare_boot_cpu(void) { init_cpu_topology(); +#ifdef CONFIG_RISCV_ERRATA_ALTERNATIVE + apply_boot_alternatives(); +#endif } void __init smp_prepare_cpus(unsigned int max_cpus) diff --git a/arch/riscv/kernel/syscall_table.c b/arch/riscv/kernel/syscall_table.c index f1ead9df96ca..a63c667c27b3 100644 --- a/arch/riscv/kernel/syscall_table.c +++ b/arch/riscv/kernel/syscall_table.c @@ -13,7 +13,7 @@ #undef __SYSCALL #define __SYSCALL(nr, call) [nr] = (call), -void *sys_call_table[__NR_syscalls] = { +void * const sys_call_table[__NR_syscalls] = { [0 ... __NR_syscalls - 1] = sys_ni_syscall, #include <asm/unistd.h> }; diff --git a/arch/riscv/kernel/time.c b/arch/riscv/kernel/time.c index 1b432264f7ef..8217b0f67c6c 100644 --- a/arch/riscv/kernel/time.c +++ b/arch/riscv/kernel/time.c @@ -11,7 +11,7 @@ #include <asm/processor.h> #include <asm/timex.h> -unsigned long riscv_timebase; +unsigned long riscv_timebase __ro_after_init; EXPORT_SYMBOL_GPL(riscv_timebase); void __init time_init(void) diff --git a/arch/riscv/kernel/traps.c b/arch/riscv/kernel/traps.c index 1357abf79570..07fdded10c21 100644 --- a/arch/riscv/kernel/traps.c +++ b/arch/riscv/kernel/traps.c @@ -197,6 +197,6 @@ int is_valid_bugaddr(unsigned long pc) #endif /* CONFIG_GENERIC_BUG */ /* stvec & scratch is already set from head.S */ -void trap_init(void) +void __init trap_init(void) { } diff --git a/arch/riscv/kernel/vdso.c b/arch/riscv/kernel/vdso.c index 3f1d35e7c98a..25a3b8849599 100644 --- a/arch/riscv/kernel/vdso.c +++ b/arch/riscv/kernel/vdso.c @@ -20,8 +20,8 @@ extern char vdso_start[], vdso_end[]; -static unsigned int vdso_pages; -static struct page **vdso_pagelist; +static unsigned int vdso_pages __ro_after_init; +static struct page **vdso_pagelist __ro_after_init; /* * The vDSO data page. diff --git a/arch/riscv/kernel/vdso/Makefile b/arch/riscv/kernel/vdso/Makefile index 71a315e73cbe..24d936c147cd 100644 --- a/arch/riscv/kernel/vdso/Makefile +++ b/arch/riscv/kernel/vdso/Makefile @@ -23,7 +23,7 @@ ifneq ($(c-gettimeofday-y),) endif # Build rules -targets := $(obj-vdso) vdso.so vdso.so.dbg vdso.lds vdso-dummy.o +targets := $(obj-vdso) vdso.so vdso.so.dbg vdso.lds vdso-syms.S obj-vdso := $(addprefix $(obj)/, $(obj-vdso)) obj-y += vdso.o vdso-syms.o @@ -41,11 +41,10 @@ KASAN_SANITIZE := n $(obj)/vdso.o: $(obj)/vdso.so # link rule for the .so file, .lds has to be first -SYSCFLAGS_vdso.so.dbg = $(c_flags) -$(obj)/vdso.so.dbg: $(src)/vdso.lds $(obj-vdso) FORCE +$(obj)/vdso.so.dbg: $(obj)/vdso.lds $(obj-vdso) FORCE $(call if_changed,vdsold) -SYSCFLAGS_vdso.so.dbg = -shared -s -Wl,-soname=linux-vdso.so.1 \ - -Wl,--build-id=sha1 -Wl,--hash-style=both +LDFLAGS_vdso.so.dbg = -shared -s -soname=linux-vdso.so.1 \ + --build-id=sha1 --hash-style=both --eh-frame-hdr # We also create a special relocatable object that should mirror the symbol # table and layout of the linked DSO. With ld --just-symbols we can then @@ -60,13 +59,10 @@ $(obj)/%.so: $(obj)/%.so.dbg FORCE # actual build commands # The DSO images are built using a special linker script -# Add -lgcc so rv32 gets static muldi3 and lshrdi3 definitions. # Make sure only to export the intended __vdso_xxx symbol offsets. quiet_cmd_vdsold = VDSOLD $@ - cmd_vdsold = $(CC) $(KBUILD_CFLAGS) $(call cc-option, -no-pie) -nostdlib -nostartfiles $(SYSCFLAGS_$(@F)) \ - -Wl,-T,$(filter-out FORCE,$^) -o $@.tmp && \ - $(CROSS_COMPILE)objcopy \ - $(patsubst %, -G __vdso_%, $(vdso-syms)) $@.tmp $@ && \ + cmd_vdsold = $(LD) $(ld_flags) -T $(filter-out FORCE,$^) -o $@.tmp && \ + $(OBJCOPY) $(patsubst %, -G __vdso_%, $(vdso-syms)) $@.tmp $@ && \ rm $@.tmp # Extracts symbol offsets from the VDSO, converting them into an assembly file diff --git a/arch/riscv/kernel/vmlinux-xip.lds.S b/arch/riscv/kernel/vmlinux-xip.lds.S new file mode 100644 index 000000000000..4b29b9917f99 --- /dev/null +++ b/arch/riscv/kernel/vmlinux-xip.lds.S @@ -0,0 +1,133 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2012 Regents of the University of California + * Copyright (C) 2017 SiFive + * Copyright (C) 2020 Vitaly Wool, Konsulko AB + */ + +#include <asm/pgtable.h> +#define LOAD_OFFSET KERNEL_LINK_ADDR +/* No __ro_after_init data in the .rodata section - which will always be ro */ +#define RO_AFTER_INIT_DATA + +#include <asm/vmlinux.lds.h> +#include <asm/page.h> +#include <asm/pgtable.h> +#include <asm/cache.h> +#include <asm/thread_info.h> + +OUTPUT_ARCH(riscv) +ENTRY(_start) + +jiffies = jiffies_64; + +SECTIONS +{ + /* Beginning of code and text segment */ + . = LOAD_OFFSET; + _xiprom = .; + _start = .; + HEAD_TEXT_SECTION + INIT_TEXT_SECTION(PAGE_SIZE) + /* we have to discard exit text and such at runtime, not link time */ + .exit.text : + { + EXIT_TEXT + } + + .text : { + _text = .; + _stext = .; + TEXT_TEXT + SCHED_TEXT + CPUIDLE_TEXT + LOCK_TEXT + KPROBES_TEXT + ENTRY_TEXT + IRQENTRY_TEXT + SOFTIRQENTRY_TEXT + *(.fixup) + _etext = .; + } + RO_DATA(L1_CACHE_BYTES) + .srodata : { + *(.srodata*) + } + .init.rodata : { + INIT_SETUP(16) + INIT_CALLS + CON_INITCALL + INIT_RAM_FS + } + _exiprom = .; /* End of XIP ROM area */ + + +/* + * From this point, stuff is considered writable and will be copied to RAM + */ + __data_loc = ALIGN(16); /* location in file */ + . = LOAD_OFFSET + XIP_OFFSET; /* location in memory */ + + _sdata = .; /* Start of data section */ + _data = .; + RW_DATA(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE) + _edata = .; + __start_ro_after_init = .; + .data.ro_after_init : AT(ADDR(.data.ro_after_init) - LOAD_OFFSET) { + *(.data..ro_after_init) + } + __end_ro_after_init = .; + + . = ALIGN(PAGE_SIZE); + __init_begin = .; + .init.data : { + INIT_DATA + } + .exit.data : { + EXIT_DATA + } + . = ALIGN(8); + __soc_early_init_table : { + __soc_early_init_table_start = .; + KEEP(*(__soc_early_init_table)) + __soc_early_init_table_end = .; + } + __soc_builtin_dtb_table : { + __soc_builtin_dtb_table_start = .; + KEEP(*(__soc_builtin_dtb_table)) + __soc_builtin_dtb_table_end = .; + } + PERCPU_SECTION(L1_CACHE_BYTES) + + . = ALIGN(PAGE_SIZE); + __init_end = .; + + .sdata : { + __global_pointer$ = . + 0x800; + *(.sdata*) + *(.sbss*) + } + + BSS_SECTION(PAGE_SIZE, PAGE_SIZE, 0) + EXCEPTION_TABLE(0x10) + + .rel.dyn : AT(ADDR(.rel.dyn) - LOAD_OFFSET) { + *(.rel.dyn*) + } + + /* + * End of copied data. We need a dummy section to get its LMA. + * Also located before final ALIGN() as trailing padding is not stored + * in the resulting binary file and useless to copy. + */ + .data.endmark : AT(ADDR(.data.endmark) - LOAD_OFFSET) { } + _edata_loc = LOADADDR(.data.endmark); + + . = ALIGN(PAGE_SIZE); + _end = .; + + STABS_DEBUG + DWARF_DEBUG + + DISCARDS +} diff --git a/arch/riscv/kernel/vmlinux.lds.S b/arch/riscv/kernel/vmlinux.lds.S index de03cb22d0e9..891742ff75a7 100644 --- a/arch/riscv/kernel/vmlinux.lds.S +++ b/arch/riscv/kernel/vmlinux.lds.S @@ -4,7 +4,13 @@ * Copyright (C) 2017 SiFive */ -#define LOAD_OFFSET PAGE_OFFSET +#ifdef CONFIG_XIP_KERNEL +#include "vmlinux-xip.lds.S" +#else + +#include <asm/pgtable.h> +#define LOAD_OFFSET KERNEL_LINK_ADDR + #include <asm/vmlinux.lds.h> #include <asm/page.h> #include <asm/cache.h> @@ -90,6 +96,13 @@ SECTIONS } __init_data_end = .; + + . = ALIGN(8); + .alternative : { + __alt_start = .; + *(.alternative) + __alt_end = .; + } __init_end = .; /* Start of data section */ @@ -132,3 +145,4 @@ SECTIONS DISCARDS } +#endif /* CONFIG_XIP_KERNEL */ diff --git a/arch/riscv/mm/fault.c b/arch/riscv/mm/fault.c index c5dbd55cbf7c..096463cc6fff 100644 --- a/arch/riscv/mm/fault.c +++ b/arch/riscv/mm/fault.c @@ -231,6 +231,19 @@ asmlinkage void do_page_fault(struct pt_regs *regs) return; } +#ifdef CONFIG_64BIT + /* + * Modules in 64bit kernels lie in their own virtual region which is not + * in the vmalloc region, but dealing with page faults in this region + * or the vmalloc region amounts to doing the same thing: checking that + * the mapping exists in init_mm.pgd and updating user page table, so + * just use vmalloc_fault. + */ + if (unlikely(addr >= MODULES_VADDR && addr < MODULES_END)) { + vmalloc_fault(regs, code, addr); + return; + } +#endif /* Enable interrupts if they were enabled in the parent context. */ if (likely(regs->status & SR_PIE)) local_irq_enable(); diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c index 92e39cfa5227..dfb5e4f7a670 100644 --- a/arch/riscv/mm/init.c +++ b/arch/riscv/mm/init.c @@ -2,6 +2,8 @@ /* * Copyright (C) 2012 Regents of the University of California * Copyright (C) 2019 Western Digital Corporation or its affiliates. + * Copyright (C) 2020 FORTH-ICS/CARV + * Nick Kossifidis <mick@ics.forth.gr> */ #include <linux/init.h> @@ -11,9 +13,11 @@ #include <linux/swap.h> #include <linux/sizes.h> #include <linux/of_fdt.h> +#include <linux/of_reserved_mem.h> #include <linux/libfdt.h> #include <linux/set_memory.h> #include <linux/dma-map-ops.h> +#include <linux/crash_dump.h> #include <asm/fixmap.h> #include <asm/tlbflush.h> @@ -25,14 +29,20 @@ #include "../kernel/head.h" +unsigned long kernel_virt_addr = KERNEL_LINK_ADDR; +EXPORT_SYMBOL(kernel_virt_addr); +#ifdef CONFIG_XIP_KERNEL +#define kernel_virt_addr (*((unsigned long *)XIP_FIXUP(&kernel_virt_addr))) +#endif + unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss; EXPORT_SYMBOL(empty_zero_page); extern char _start[]; #define DTB_EARLY_BASE_VA PGDIR_SIZE -void *dtb_early_va __initdata; -uintptr_t dtb_early_pa __initdata; +void *_dtb_early_va __initdata; +uintptr_t _dtb_early_pa __initdata; struct pt_alloc_ops { pte_t *(*get_pte_virt)(phys_addr_t pa); @@ -57,7 +67,7 @@ static void __init zone_sizes_init(void) free_area_init(max_zone_pfns); } -static void setup_zero_page(void) +static void __init setup_zero_page(void) { memset((void *)empty_zero_page, 0, PAGE_SIZE); } @@ -75,7 +85,7 @@ static inline void print_mlm(char *name, unsigned long b, unsigned long t) (((t) - (b)) >> 20)); } -static void print_vm_layout(void) +static void __init print_vm_layout(void) { pr_notice("Virtual kernel memory layout:\n"); print_mlk("fixmap", (unsigned long)FIXADDR_START, @@ -88,6 +98,10 @@ static void print_vm_layout(void) (unsigned long)VMALLOC_END); print_mlm("lowmem", (unsigned long)PAGE_OFFSET, (unsigned long)high_memory); +#ifdef CONFIG_64BIT + print_mlm("kernel", (unsigned long)KERNEL_LINK_ADDR, + (unsigned long)ADDRESS_SPACE_END); +#endif } #else static void print_vm_layout(void) { } @@ -112,11 +126,20 @@ void __init setup_bootmem(void) phys_addr_t dram_end = memblock_end_of_DRAM(); phys_addr_t max_mapped_addr = __pa(~(ulong)0); +#ifdef CONFIG_XIP_KERNEL + vmlinux_start = __pa_symbol(&_sdata); +#endif + /* The maximal physical memory size is -PAGE_OFFSET. */ memblock_enforce_memory_limit(-PAGE_OFFSET); - /* Reserve from the start of the kernel to the end of the kernel */ - memblock_reserve(vmlinux_start, vmlinux_end - vmlinux_start); + /* + * Reserve from the start of the kernel to the end of the kernel + * and make sure we align the reservation on PMD_SIZE since we will + * map the kernel in the linear mapping as read-only: we do not want + * any allocation to happen between _end and the next pmd aligned page. + */ + memblock_reserve(vmlinux_start, (vmlinux_end - vmlinux_start + PMD_SIZE - 1) & PMD_MASK); /* * memblock allocator is not aware of the fact that last 4K bytes of @@ -127,8 +150,9 @@ void __init setup_bootmem(void) if (max_mapped_addr == (dram_end - 1)) memblock_set_current_limit(max_mapped_addr - 4096); - max_pfn = PFN_DOWN(dram_end); - max_low_pfn = max_pfn; + min_low_pfn = PFN_UP(memblock_start_of_DRAM()); + max_low_pfn = max_pfn = PFN_DOWN(dram_end); + dma32_phys_limit = min(4UL * SZ_1G, (unsigned long)PFN_PHYS(max_low_pfn)); set_max_mapnr(max_low_pfn - ARCH_PFN_OFFSET); @@ -147,12 +171,42 @@ void __init setup_bootmem(void) memblock_allow_resize(); } +#ifdef CONFIG_XIP_KERNEL + +extern char _xiprom[], _exiprom[]; +extern char _sdata[], _edata[]; + +#endif /* CONFIG_XIP_KERNEL */ + #ifdef CONFIG_MMU -static struct pt_alloc_ops pt_ops; +static struct pt_alloc_ops _pt_ops __ro_after_init; + +#ifdef CONFIG_XIP_KERNEL +#define pt_ops (*(struct pt_alloc_ops *)XIP_FIXUP(&_pt_ops)) +#else +#define pt_ops _pt_ops +#endif -unsigned long va_pa_offset; +/* Offset between linear mapping virtual address and kernel load address */ +unsigned long va_pa_offset __ro_after_init; EXPORT_SYMBOL(va_pa_offset); -unsigned long pfn_base; +#ifdef CONFIG_XIP_KERNEL +#define va_pa_offset (*((unsigned long *)XIP_FIXUP(&va_pa_offset))) +#endif +/* Offset between kernel mapping virtual address and kernel load address */ +#ifdef CONFIG_64BIT +unsigned long va_kernel_pa_offset; +EXPORT_SYMBOL(va_kernel_pa_offset); +#endif +#ifdef CONFIG_XIP_KERNEL +#define va_kernel_pa_offset (*((unsigned long *)XIP_FIXUP(&va_kernel_pa_offset))) +#endif +unsigned long va_kernel_xip_pa_offset; +EXPORT_SYMBOL(va_kernel_xip_pa_offset); +#ifdef CONFIG_XIP_KERNEL +#define va_kernel_xip_pa_offset (*((unsigned long *)XIP_FIXUP(&va_kernel_xip_pa_offset))) +#endif +unsigned long pfn_base __ro_after_init; EXPORT_SYMBOL(pfn_base); pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_bss; @@ -161,6 +215,12 @@ pte_t fixmap_pte[PTRS_PER_PTE] __page_aligned_bss; pgd_t early_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE); +#ifdef CONFIG_XIP_KERNEL +#define trampoline_pg_dir ((pgd_t *)XIP_FIXUP(trampoline_pg_dir)) +#define fixmap_pte ((pte_t *)XIP_FIXUP(fixmap_pte)) +#define early_pg_dir ((pgd_t *)XIP_FIXUP(early_pg_dir)) +#endif /* CONFIG_XIP_KERNEL */ + void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot) { unsigned long addr = __fix_to_virt(idx); @@ -212,8 +272,8 @@ static phys_addr_t alloc_pte_late(uintptr_t va) unsigned long vaddr; vaddr = __get_free_page(GFP_KERNEL); - if (!vaddr || !pgtable_pte_page_ctor(virt_to_page(vaddr))) - BUG(); + BUG_ON(!vaddr || !pgtable_pte_page_ctor(virt_to_page(vaddr))); + return __pa(vaddr); } @@ -236,6 +296,12 @@ pmd_t fixmap_pmd[PTRS_PER_PMD] __page_aligned_bss; pmd_t early_pmd[PTRS_PER_PMD] __initdata __aligned(PAGE_SIZE); pmd_t early_dtb_pmd[PTRS_PER_PMD] __initdata __aligned(PAGE_SIZE); +#ifdef CONFIG_XIP_KERNEL +#define trampoline_pmd ((pmd_t *)XIP_FIXUP(trampoline_pmd)) +#define fixmap_pmd ((pmd_t *)XIP_FIXUP(fixmap_pmd)) +#define early_pmd ((pmd_t *)XIP_FIXUP(early_pmd)) +#endif /* CONFIG_XIP_KERNEL */ + static pmd_t *__init get_pmd_virt_early(phys_addr_t pa) { /* Before MMU is enabled */ @@ -255,7 +321,7 @@ static pmd_t *get_pmd_virt_late(phys_addr_t pa) static phys_addr_t __init alloc_pmd_early(uintptr_t va) { - BUG_ON((va - PAGE_OFFSET) >> PGDIR_SHIFT); + BUG_ON((va - kernel_virt_addr) >> PGDIR_SHIFT); return (uintptr_t)early_pmd; } @@ -352,6 +418,19 @@ static uintptr_t __init best_map_size(phys_addr_t base, phys_addr_t size) return PMD_SIZE; } +#ifdef CONFIG_XIP_KERNEL +/* called from head.S with MMU off */ +asmlinkage void __init __copy_data(void) +{ + void *from = (void *)(&_sdata); + void *end = (void *)(&_end); + void *to = (void *)CONFIG_PHYS_RAM_BASE; + size_t sz = (size_t)(end - from + 1); + + memcpy(to, from, sz); +} +#endif + /* * setup_vm() is called from head.S with MMU-off. * @@ -370,17 +449,74 @@ static uintptr_t __init best_map_size(phys_addr_t base, phys_addr_t size) #error "setup_vm() is called from head.S before relocate so it should not use absolute addressing." #endif +uintptr_t load_pa, load_sz; +#ifdef CONFIG_XIP_KERNEL +#define load_pa (*((uintptr_t *)XIP_FIXUP(&load_pa))) +#define load_sz (*((uintptr_t *)XIP_FIXUP(&load_sz))) +#endif + +#ifdef CONFIG_XIP_KERNEL +uintptr_t xiprom, xiprom_sz; +#define xiprom_sz (*((uintptr_t *)XIP_FIXUP(&xiprom_sz))) +#define xiprom (*((uintptr_t *)XIP_FIXUP(&xiprom))) + +static void __init create_kernel_page_table(pgd_t *pgdir, uintptr_t map_size) +{ + uintptr_t va, end_va; + + /* Map the flash resident part */ + end_va = kernel_virt_addr + xiprom_sz; + for (va = kernel_virt_addr; va < end_va; va += map_size) + create_pgd_mapping(pgdir, va, + xiprom + (va - kernel_virt_addr), + map_size, PAGE_KERNEL_EXEC); + + /* Map the data in RAM */ + end_va = kernel_virt_addr + XIP_OFFSET + load_sz; + for (va = kernel_virt_addr + XIP_OFFSET; va < end_va; va += map_size) + create_pgd_mapping(pgdir, va, + load_pa + (va - (kernel_virt_addr + XIP_OFFSET)), + map_size, PAGE_KERNEL); +} +#else +static void __init create_kernel_page_table(pgd_t *pgdir, uintptr_t map_size) +{ + uintptr_t va, end_va; + + end_va = kernel_virt_addr + load_sz; + for (va = kernel_virt_addr; va < end_va; va += map_size) + create_pgd_mapping(pgdir, va, + load_pa + (va - kernel_virt_addr), + map_size, PAGE_KERNEL_EXEC); +} +#endif + asmlinkage void __init setup_vm(uintptr_t dtb_pa) { - uintptr_t va, pa, end_va; - uintptr_t load_pa = (uintptr_t)(&_start); - uintptr_t load_sz = (uintptr_t)(&_end) - load_pa; + uintptr_t __maybe_unused pa; uintptr_t map_size; #ifndef __PAGETABLE_PMD_FOLDED pmd_t fix_bmap_spmd, fix_bmap_epmd; #endif +#ifdef CONFIG_XIP_KERNEL + xiprom = (uintptr_t)CONFIG_XIP_PHYS_ADDR; + xiprom_sz = (uintptr_t)(&_exiprom) - (uintptr_t)(&_xiprom); + + load_pa = (uintptr_t)CONFIG_PHYS_RAM_BASE; + load_sz = (uintptr_t)(&_end) - (uintptr_t)(&_sdata); + + va_kernel_xip_pa_offset = kernel_virt_addr - xiprom; +#else + load_pa = (uintptr_t)(&_start); + load_sz = (uintptr_t)(&_end) - load_pa; +#endif + va_pa_offset = PAGE_OFFSET - load_pa; +#ifdef CONFIG_64BIT + va_kernel_pa_offset = kernel_virt_addr - load_pa; +#endif + pfn_base = PFN_DOWN(load_pa); /* @@ -408,26 +544,27 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa) create_pmd_mapping(fixmap_pmd, FIXADDR_START, (uintptr_t)fixmap_pte, PMD_SIZE, PAGE_TABLE); /* Setup trampoline PGD and PMD */ - create_pgd_mapping(trampoline_pg_dir, PAGE_OFFSET, + create_pgd_mapping(trampoline_pg_dir, kernel_virt_addr, (uintptr_t)trampoline_pmd, PGDIR_SIZE, PAGE_TABLE); - create_pmd_mapping(trampoline_pmd, PAGE_OFFSET, +#ifdef CONFIG_XIP_KERNEL + create_pmd_mapping(trampoline_pmd, kernel_virt_addr, + xiprom, PMD_SIZE, PAGE_KERNEL_EXEC); +#else + create_pmd_mapping(trampoline_pmd, kernel_virt_addr, load_pa, PMD_SIZE, PAGE_KERNEL_EXEC); +#endif #else /* Setup trampoline PGD */ - create_pgd_mapping(trampoline_pg_dir, PAGE_OFFSET, + create_pgd_mapping(trampoline_pg_dir, kernel_virt_addr, load_pa, PGDIR_SIZE, PAGE_KERNEL_EXEC); #endif /* - * Setup early PGD covering entire kernel which will allows + * Setup early PGD covering entire kernel which will allow * us to reach paging_init(). We map all memory banks later * in setup_vm_final() below. */ - end_va = PAGE_OFFSET + load_sz; - for (va = PAGE_OFFSET; va < end_va; va += map_size) - create_pgd_mapping(early_pg_dir, va, - load_pa + (va - PAGE_OFFSET), - map_size, PAGE_KERNEL_EXEC); + create_kernel_page_table(early_pg_dir, map_size); #ifndef __PAGETABLE_PMD_FOLDED /* Setup early PMD for DTB */ @@ -442,7 +579,16 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa) pa + PMD_SIZE, PMD_SIZE, PAGE_KERNEL); dtb_early_va = (void *)DTB_EARLY_BASE_VA + (dtb_pa & (PMD_SIZE - 1)); #else /* CONFIG_BUILTIN_DTB */ +#ifdef CONFIG_64BIT + /* + * __va can't be used since it would return a linear mapping address + * whereas dtb_early_va will be used before setup_vm_final installs + * the linear mapping. + */ + dtb_early_va = kernel_mapping_pa_to_va(XIP_FIXUP(dtb_pa)); +#else dtb_early_va = __va(dtb_pa); +#endif /* CONFIG_64BIT */ #endif /* CONFIG_BUILTIN_DTB */ #else #ifndef CONFIG_BUILTIN_DTB @@ -454,7 +600,11 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa) pa + PGDIR_SIZE, PGDIR_SIZE, PAGE_KERNEL); dtb_early_va = (void *)DTB_EARLY_BASE_VA + (dtb_pa & (PGDIR_SIZE - 1)); #else /* CONFIG_BUILTIN_DTB */ +#ifdef CONFIG_64BIT + dtb_early_va = kernel_mapping_pa_to_va(XIP_FIXUP(dtb_pa)); +#else dtb_early_va = __va(dtb_pa); +#endif /* CONFIG_64BIT */ #endif /* CONFIG_BUILTIN_DTB */ #endif dtb_early_pa = dtb_pa; @@ -490,6 +640,22 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa) #endif } +#if defined(CONFIG_64BIT) && !defined(CONFIG_XIP_KERNEL) +void protect_kernel_linear_mapping_text_rodata(void) +{ + unsigned long text_start = (unsigned long)lm_alias(_start); + unsigned long init_text_start = (unsigned long)lm_alias(__init_text_begin); + unsigned long rodata_start = (unsigned long)lm_alias(__start_rodata); + unsigned long data_start = (unsigned long)lm_alias(_data); + + set_memory_ro(text_start, (init_text_start - text_start) >> PAGE_SHIFT); + set_memory_nx(text_start, (init_text_start - text_start) >> PAGE_SHIFT); + + set_memory_ro(rodata_start, (data_start - rodata_start) >> PAGE_SHIFT); + set_memory_nx(rodata_start, (data_start - rodata_start) >> PAGE_SHIFT); +} +#endif + static void __init setup_vm_final(void) { uintptr_t va, map_size; @@ -511,7 +677,7 @@ static void __init setup_vm_final(void) __pa_symbol(fixmap_pgd_next), PGDIR_SIZE, PAGE_TABLE); - /* Map all memory banks */ + /* Map all memory banks in the linear mapping */ for_each_mem_range(i, &start, &end) { if (start >= end) break; @@ -523,10 +689,22 @@ static void __init setup_vm_final(void) for (pa = start; pa < end; pa += map_size) { va = (uintptr_t)__va(pa); create_pgd_mapping(swapper_pg_dir, va, pa, - map_size, PAGE_KERNEL_EXEC); + map_size, +#ifdef CONFIG_64BIT + PAGE_KERNEL +#else + PAGE_KERNEL_EXEC +#endif + ); + } } +#ifdef CONFIG_64BIT + /* Map the kernel */ + create_kernel_page_table(swapper_pg_dir, PMD_SIZE); +#endif + /* Clear fixmap PTE and PMD mappings */ clear_fixmap(FIX_PTE); clear_fixmap(FIX_PMD); @@ -556,7 +734,7 @@ static inline void setup_vm_final(void) #endif /* CONFIG_MMU */ #ifdef CONFIG_STRICT_KERNEL_RWX -void protect_kernel_text_data(void) +void __init protect_kernel_text_data(void) { unsigned long text_start = (unsigned long)_start; unsigned long init_text_start = (unsigned long)__init_text_begin; @@ -584,6 +762,103 @@ void mark_rodata_ro(void) } #endif +#ifdef CONFIG_KEXEC_CORE +/* + * reserve_crashkernel() - reserves memory for crash kernel + * + * This function reserves memory area given in "crashkernel=" kernel command + * line parameter. The memory reserved is used by dump capture kernel when + * primary kernel is crashing. + */ +static void __init reserve_crashkernel(void) +{ + unsigned long long crash_base = 0; + unsigned long long crash_size = 0; + unsigned long search_start = memblock_start_of_DRAM(); + unsigned long search_end = memblock_end_of_DRAM(); + + int ret = 0; + + /* + * Don't reserve a region for a crash kernel on a crash kernel + * since it doesn't make much sense and we have limited memory + * resources. + */ +#ifdef CONFIG_CRASH_DUMP + if (is_kdump_kernel()) { + pr_info("crashkernel: ignoring reservation request\n"); + return; + } +#endif + + ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(), + &crash_size, &crash_base); + if (ret || !crash_size) + return; + + crash_size = PAGE_ALIGN(crash_size); + + if (crash_base == 0) { + /* + * Current riscv boot protocol requires 2MB alignment for + * RV64 and 4MB alignment for RV32 (hugepage size) + */ + crash_base = memblock_find_in_range(search_start, search_end, + crash_size, PMD_SIZE); + + if (crash_base == 0) { + pr_warn("crashkernel: couldn't allocate %lldKB\n", + crash_size >> 10); + return; + } + } else { + /* User specifies base address explicitly. */ + if (!memblock_is_region_memory(crash_base, crash_size)) { + pr_warn("crashkernel: requested region is not memory\n"); + return; + } + + if (memblock_is_region_reserved(crash_base, crash_size)) { + pr_warn("crashkernel: requested region is reserved\n"); + return; + } + + + if (!IS_ALIGNED(crash_base, PMD_SIZE)) { + pr_warn("crashkernel: requested region is misaligned\n"); + return; + } + } + memblock_reserve(crash_base, crash_size); + + pr_info("crashkernel: reserved 0x%016llx - 0x%016llx (%lld MB)\n", + crash_base, crash_base + crash_size, crash_size >> 20); + + crashk_res.start = crash_base; + crashk_res.end = crash_base + crash_size - 1; +} +#endif /* CONFIG_KEXEC_CORE */ + +#ifdef CONFIG_CRASH_DUMP +/* + * We keep track of the ELF core header of the crashed + * kernel with a reserved-memory region with compatible + * string "linux,elfcorehdr". Here we register a callback + * to populate elfcorehdr_addr/size when this region is + * present. Note that this region will be marked as + * reserved once we call early_init_fdt_scan_reserved_mem() + * later on. + */ +static int elfcore_hdr_setup(struct reserved_mem *rmem) +{ + elfcorehdr_addr = rmem->base; + elfcorehdr_size = rmem->size; + return 0; +} + +RESERVEDMEM_OF_DECLARE(elfcorehdr, "linux,elfcorehdr", elfcore_hdr_setup); +#endif + void __init paging_init(void) { setup_vm_final(); @@ -592,9 +867,13 @@ void __init paging_init(void) void __init misc_mem_init(void) { + early_memtest(min_low_pfn << PAGE_SHIFT, max_low_pfn << PAGE_SHIFT); arch_numa_init(); sparse_init(); zone_sizes_init(); +#ifdef CONFIG_KEXEC_CORE + reserve_crashkernel(); +#endif memblock_dump_all(); } diff --git a/arch/riscv/mm/kasan_init.c b/arch/riscv/mm/kasan_init.c index 937d13ce9ab8..9daacae93e33 100644 --- a/arch/riscv/mm/kasan_init.c +++ b/arch/riscv/mm/kasan_init.c @@ -11,18 +11,6 @@ #include <asm/fixmap.h> #include <asm/pgalloc.h> -static __init void *early_alloc(size_t size, int node) -{ - void *ptr = memblock_alloc_try_nid(size, size, - __pa(MAX_DMA_ADDRESS), MEMBLOCK_ALLOC_ACCESSIBLE, node); - - if (!ptr) - panic("%pS: Failed to allocate %zu bytes align=%zx nid=%d from=%llx\n", - __func__, size, size, node, (u64)__pa(MAX_DMA_ADDRESS)); - - return ptr; -} - extern pgd_t early_pg_dir[PTRS_PER_PGD]; asmlinkage void __init kasan_early_init(void) { @@ -60,7 +48,7 @@ asmlinkage void __init kasan_early_init(void) local_flush_tlb_all(); } -static void kasan_populate_pte(pmd_t *pmd, unsigned long vaddr, unsigned long end) +static void __init kasan_populate_pte(pmd_t *pmd, unsigned long vaddr, unsigned long end) { phys_addr_t phys_addr; pte_t *ptep, *base_pte; @@ -82,7 +70,7 @@ static void kasan_populate_pte(pmd_t *pmd, unsigned long vaddr, unsigned long en set_pmd(pmd, pfn_pmd(PFN_DOWN(__pa(base_pte)), PAGE_TABLE)); } -static void kasan_populate_pmd(pgd_t *pgd, unsigned long vaddr, unsigned long end) +static void __init kasan_populate_pmd(pgd_t *pgd, unsigned long vaddr, unsigned long end) { phys_addr_t phys_addr; pmd_t *pmdp, *base_pmd; @@ -117,7 +105,7 @@ static void kasan_populate_pmd(pgd_t *pgd, unsigned long vaddr, unsigned long en set_pgd(pgd, pfn_pgd(PFN_DOWN(__pa(base_pmd)), PAGE_TABLE)); } -static void kasan_populate_pgd(unsigned long vaddr, unsigned long end) +static void __init kasan_populate_pgd(unsigned long vaddr, unsigned long end) { phys_addr_t phys_addr; pgd_t *pgdp = pgd_offset_k(vaddr); @@ -155,39 +143,27 @@ static void __init kasan_populate(void *start, void *end) memset(start, KASAN_SHADOW_INIT, end - start); } +static void __init kasan_shallow_populate_pgd(unsigned long vaddr, unsigned long end) +{ + unsigned long next; + void *p; + pgd_t *pgd_k = pgd_offset_k(vaddr); + + do { + next = pgd_addr_end(vaddr, end); + if (pgd_page_vaddr(*pgd_k) == (unsigned long)lm_alias(kasan_early_shadow_pmd)) { + p = memblock_alloc(PAGE_SIZE, PAGE_SIZE); + set_pgd(pgd_k, pfn_pgd(PFN_DOWN(__pa(p)), PAGE_TABLE)); + } + } while (pgd_k++, vaddr = next, vaddr != end); +} + static void __init kasan_shallow_populate(void *start, void *end) { unsigned long vaddr = (unsigned long)start & PAGE_MASK; unsigned long vend = PAGE_ALIGN((unsigned long)end); - unsigned long pfn; - int index; - void *p; - pud_t *pud_dir, *pud_k; - pgd_t *pgd_dir, *pgd_k; - p4d_t *p4d_dir, *p4d_k; - - while (vaddr < vend) { - index = pgd_index(vaddr); - pfn = csr_read(CSR_SATP) & SATP_PPN; - pgd_dir = (pgd_t *)pfn_to_virt(pfn) + index; - pgd_k = init_mm.pgd + index; - pgd_dir = pgd_offset_k(vaddr); - set_pgd(pgd_dir, *pgd_k); - - p4d_dir = p4d_offset(pgd_dir, vaddr); - p4d_k = p4d_offset(pgd_k, vaddr); - - vaddr = (vaddr + PUD_SIZE) & PUD_MASK; - pud_dir = pud_offset(p4d_dir, vaddr); - pud_k = pud_offset(p4d_k, vaddr); - - if (pud_present(*pud_dir)) { - p = early_alloc(PAGE_SIZE, NUMA_NO_NODE); - pud_populate(&init_mm, pud_dir, p); - } - vaddr += PAGE_SIZE; - } + kasan_shallow_populate_pgd(vaddr, vend); local_flush_tlb_all(); } @@ -196,6 +172,10 @@ void __init kasan_init(void) phys_addr_t _start, _end; u64 i; + /* + * Populate all kernel virtual address space with kasan_early_shadow_page + * except for the linear mapping and the modules/kernel/BPF mapping. + */ kasan_populate_early_shadow((void *)KASAN_SHADOW_START, (void *)kasan_mem_to_shadow((void *) VMEMMAP_END)); @@ -208,6 +188,7 @@ void __init kasan_init(void) (void *)kasan_mem_to_shadow((void *)VMALLOC_START), (void *)kasan_mem_to_shadow((void *)VMALLOC_END)); + /* Populate the linear mapping */ for_each_mem_range(i, &_start, &_end) { void *start = (void *)__va(_start); void *end = (void *)__va(_end); @@ -218,6 +199,10 @@ void __init kasan_init(void) kasan_populate(kasan_mem_to_shadow(start), kasan_mem_to_shadow(end)); } + /* Populate kernel, BPF, modules mapping */ + kasan_populate(kasan_mem_to_shadow((const void *)MODULES_VADDR), + kasan_mem_to_shadow((const void *)BPF_JIT_REGION_END)); + for (i = 0; i < PTRS_PER_PTE; i++) set_pte(&kasan_early_shadow_pte[i], mk_pte(virt_to_page(kasan_early_shadow_page), diff --git a/arch/riscv/mm/physaddr.c b/arch/riscv/mm/physaddr.c index e8e4dcd39fed..35703d5ef5fd 100644 --- a/arch/riscv/mm/physaddr.c +++ b/arch/riscv/mm/physaddr.c @@ -23,7 +23,7 @@ EXPORT_SYMBOL(__virt_to_phys); phys_addr_t __phys_addr_symbol(unsigned long x) { - unsigned long kernel_start = (unsigned long)PAGE_OFFSET; + unsigned long kernel_start = (unsigned long)kernel_virt_addr; unsigned long kernel_end = (unsigned long)_end; /* diff --git a/arch/riscv/mm/ptdump.c b/arch/riscv/mm/ptdump.c index ace74dec7492..0536ac84b730 100644 --- a/arch/riscv/mm/ptdump.c +++ b/arch/riscv/mm/ptdump.c @@ -58,29 +58,56 @@ struct ptd_mm_info { unsigned long end; }; +enum address_markers_idx { +#ifdef CONFIG_KASAN + KASAN_SHADOW_START_NR, + KASAN_SHADOW_END_NR, +#endif + FIXMAP_START_NR, + FIXMAP_END_NR, + PCI_IO_START_NR, + PCI_IO_END_NR, +#ifdef CONFIG_SPARSEMEM_VMEMMAP + VMEMMAP_START_NR, + VMEMMAP_END_NR, +#endif + VMALLOC_START_NR, + VMALLOC_END_NR, + PAGE_OFFSET_NR, +#ifdef CONFIG_64BIT + MODULES_MAPPING_NR, + KERNEL_MAPPING_NR, +#endif + END_OF_SPACE_NR +}; + static struct addr_marker address_markers[] = { #ifdef CONFIG_KASAN - {KASAN_SHADOW_START, "Kasan shadow start"}, - {KASAN_SHADOW_END, "Kasan shadow end"}, + {0, "Kasan shadow start"}, + {0, "Kasan shadow end"}, #endif - {FIXADDR_START, "Fixmap start"}, - {FIXADDR_TOP, "Fixmap end"}, - {PCI_IO_START, "PCI I/O start"}, - {PCI_IO_END, "PCI I/O end"}, + {0, "Fixmap start"}, + {0, "Fixmap end"}, + {0, "PCI I/O start"}, + {0, "PCI I/O end"}, #ifdef CONFIG_SPARSEMEM_VMEMMAP - {VMEMMAP_START, "vmemmap start"}, - {VMEMMAP_END, "vmemmap end"}, + {0, "vmemmap start"}, + {0, "vmemmap end"}, +#endif + {0, "vmalloc() area"}, + {0, "vmalloc() end"}, + {0, "Linear mapping"}, +#ifdef CONFIG_64BIT + {0, "Modules mapping"}, + {0, "Kernel mapping (kernel, BPF)"}, #endif - {VMALLOC_START, "vmalloc() area"}, - {VMALLOC_END, "vmalloc() end"}, - {PAGE_OFFSET, "Linear mapping"}, {-1, NULL}, }; static struct ptd_mm_info kernel_ptd_info = { .mm = &init_mm, .markers = address_markers, - .base_addr = KERN_VIRT_START, + .base_addr = 0, .end = ULONG_MAX, }; @@ -331,10 +358,32 @@ static int ptdump_show(struct seq_file *m, void *v) DEFINE_SHOW_ATTRIBUTE(ptdump); -static int ptdump_init(void) +static int __init ptdump_init(void) { unsigned int i, j; +#ifdef CONFIG_KASAN + address_markers[KASAN_SHADOW_START_NR].start_address = KASAN_SHADOW_START; + address_markers[KASAN_SHADOW_END_NR].start_address = KASAN_SHADOW_END; +#endif + address_markers[FIXMAP_START_NR].start_address = FIXADDR_START; + address_markers[FIXMAP_END_NR].start_address = FIXADDR_TOP; + address_markers[PCI_IO_START_NR].start_address = PCI_IO_START; + address_markers[PCI_IO_END_NR].start_address = PCI_IO_END; +#ifdef CONFIG_SPARSEMEM_VMEMMAP + address_markers[VMEMMAP_START_NR].start_address = VMEMMAP_START; + address_markers[VMEMMAP_END_NR].start_address = VMEMMAP_END; +#endif + address_markers[VMALLOC_START_NR].start_address = VMALLOC_START; + address_markers[VMALLOC_END_NR].start_address = VMALLOC_END; + address_markers[PAGE_OFFSET_NR].start_address = PAGE_OFFSET; +#ifdef CONFIG_64BIT + address_markers[MODULES_MAPPING_NR].start_address = MODULES_VADDR; + address_markers[KERNEL_MAPPING_NR].start_address = kernel_virt_addr; +#endif + + kernel_ptd_info.base_addr = KERN_VIRT_START; + for (i = 0; i < ARRAY_SIZE(pg_level); i++) for (j = 0; j < ARRAY_SIZE(pte_bits); j++) pg_level[i].mask |= pte_bits[j].mask; diff --git a/arch/riscv/net/bpf_jit_comp64.c b/arch/riscv/net/bpf_jit_comp64.c index b44ff52f84a6..87e3bf5b9086 100644 --- a/arch/riscv/net/bpf_jit_comp64.c +++ b/arch/riscv/net/bpf_jit_comp64.c @@ -1148,16 +1148,3 @@ void bpf_jit_build_epilogue(struct rv_jit_context *ctx) { __build_epilogue(false, ctx); } - -void *bpf_jit_alloc_exec(unsigned long size) -{ - return __vmalloc_node_range(size, PAGE_SIZE, BPF_JIT_REGION_START, - BPF_JIT_REGION_END, GFP_KERNEL, - PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE, - __builtin_return_address(0)); -} - -void bpf_jit_free_exec(void *addr) -{ - return vfree(addr); -} diff --git a/arch/riscv/net/bpf_jit_core.c b/arch/riscv/net/bpf_jit_core.c index 3630d447352c..fed86f42dfbe 100644 --- a/arch/riscv/net/bpf_jit_core.c +++ b/arch/riscv/net/bpf_jit_core.c @@ -152,6 +152,7 @@ skip_init_ctx: bpf_flush_icache(jit_data->header, ctx->insns + ctx->ninsns); if (!prog->is_func || extra_pass) { + bpf_jit_binary_lock_ro(jit_data->header); out_offset: kfree(ctx->offset); kfree(jit_data); @@ -164,3 +165,16 @@ out: tmp : orig_prog); return prog; } + +void *bpf_jit_alloc_exec(unsigned long size) +{ + return __vmalloc_node_range(size, PAGE_SIZE, BPF_JIT_REGION_START, + BPF_JIT_REGION_END, GFP_KERNEL, + PAGE_KERNEL, 0, NUMA_NO_NODE, + __builtin_return_address(0)); +} + +void bpf_jit_free_exec(void *addr) +{ + return vfree(addr); +} diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index c1ff874e6c2e..b4c7c34069f8 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -60,6 +60,9 @@ config S390 imply IMA_SECURE_AND_OR_TRUSTED_BOOT select ARCH_32BIT_USTAT_F_TINODE select ARCH_BINFMT_ELF_STATE + select ARCH_ENABLE_MEMORY_HOTPLUG if SPARSEMEM + select ARCH_ENABLE_MEMORY_HOTREMOVE + select ARCH_ENABLE_SPLIT_PMD_PTLOCK select ARCH_HAS_DEBUG_VM_PGTABLE select ARCH_HAS_DEBUG_WX select ARCH_HAS_DEVMEM_IS_ALLOWED @@ -137,6 +140,7 @@ config S390 select HAVE_ARCH_JUMP_LABEL_RELATIVE select HAVE_ARCH_KASAN select HAVE_ARCH_KASAN_VMALLOC + select HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET select HAVE_ARCH_SECCOMP_FILTER select HAVE_ARCH_SOFT_DIRTY select HAVE_ARCH_TRACEHOOK @@ -626,15 +630,6 @@ config ARCH_SPARSEMEM_ENABLE config ARCH_SPARSEMEM_DEFAULT def_bool y -config ARCH_ENABLE_MEMORY_HOTPLUG - def_bool y if SPARSEMEM - -config ARCH_ENABLE_MEMORY_HOTREMOVE - def_bool y - -config ARCH_ENABLE_SPLIT_PMD_PTLOCK - def_bool y - config MAX_PHYSMEM_BITS int "Maximum size of supported physical memory in bits (42-53)" range 42 53 diff --git a/arch/s390/configs/debug_defconfig b/arch/s390/configs/debug_defconfig index 6422618a4f75..86afcc6b56bf 100644 --- a/arch/s390/configs/debug_defconfig +++ b/arch/s390/configs/debug_defconfig @@ -387,6 +387,7 @@ CONFIG_CGROUP_NET_PRIO=y CONFIG_BPF_JIT=y CONFIG_NET_PKTGEN=m CONFIG_PCI=y +CONFIG_PCI_IOV=y # CONFIG_PCIEASPM is not set CONFIG_PCI_DEBUG=y CONFIG_HOTPLUG_PCI=y @@ -548,7 +549,7 @@ CONFIG_INPUT_EVDEV=y # CONFIG_INPUT_MOUSE is not set # CONFIG_SERIO is not set CONFIG_LEGACY_PTY_COUNT=0 -CONFIG_VIRTIO_CONSOLE=y +CONFIG_VIRTIO_CONSOLE=m CONFIG_HW_RANDOM_VIRTIO=m CONFIG_RAW_DRIVER=m CONFIG_HANGCHECK_TIMER=m diff --git a/arch/s390/configs/defconfig b/arch/s390/configs/defconfig index 371a529546aa..71b49ea5b058 100644 --- a/arch/s390/configs/defconfig +++ b/arch/s390/configs/defconfig @@ -377,6 +377,7 @@ CONFIG_CGROUP_NET_PRIO=y CONFIG_BPF_JIT=y CONFIG_NET_PKTGEN=m CONFIG_PCI=y +CONFIG_PCI_IOV=y # CONFIG_PCIEASPM is not set CONFIG_HOTPLUG_PCI=y CONFIG_HOTPLUG_PCI_S390=y @@ -540,7 +541,7 @@ CONFIG_INPUT_EVDEV=y # CONFIG_INPUT_MOUSE is not set # CONFIG_SERIO is not set CONFIG_LEGACY_PTY_COUNT=0 -CONFIG_VIRTIO_CONSOLE=y +CONFIG_VIRTIO_CONSOLE=m CONFIG_HW_RANDOM_VIRTIO=m CONFIG_RAW_DRIVER=m CONFIG_HANGCHECK_TIMER=m diff --git a/arch/s390/include/asm/cpu_mcf.h b/arch/s390/include/asm/cpu_mcf.h index 649b9fc60685..3e4cbcb7c4cc 100644 --- a/arch/s390/include/asm/cpu_mcf.h +++ b/arch/s390/include/asm/cpu_mcf.h @@ -123,4 +123,6 @@ static inline int stccm_avail(void) return test_facility(142); } +size_t cpum_cf_ctrset_size(enum cpumf_ctr_set ctrset, + struct cpumf_ctr_info *info); #endif /* _ASM_S390_CPU_MCF_H */ diff --git a/arch/s390/include/asm/entry-common.h b/arch/s390/include/asm/entry-common.h index 9cceb26ed63f..baa8005090c3 100644 --- a/arch/s390/include/asm/entry-common.h +++ b/arch/s390/include/asm/entry-common.h @@ -4,9 +4,11 @@ #include <linux/sched.h> #include <linux/audit.h> +#include <linux/randomize_kstack.h> #include <linux/tracehook.h> #include <linux/processor.h> #include <linux/uaccess.h> +#include <asm/timex.h> #include <asm/fpu/api.h> #define ARCH_EXIT_TO_USER_MODE_WORK (_TIF_GUARDED_STORAGE | _TIF_PER_TRAP) @@ -48,6 +50,14 @@ static __always_inline void arch_exit_to_user_mode(void) #define arch_exit_to_user_mode arch_exit_to_user_mode +static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs, + unsigned long ti_work) +{ + choose_random_kstack_offset(get_tod_clock_fast() & 0xff); +} + +#define arch_exit_to_user_mode_prepare arch_exit_to_user_mode_prepare + static inline bool on_thread_stack(void) { return !(((unsigned long)(current->stack) ^ current_stack_pointer()) & ~(THREAD_SIZE - 1)); diff --git a/arch/s390/include/asm/io.h b/arch/s390/include/asm/io.h index 28664ee0abc1..e3882b012bfa 100644 --- a/arch/s390/include/asm/io.h +++ b/arch/s390/include/asm/io.h @@ -20,11 +20,6 @@ void *xlate_dev_mem_ptr(phys_addr_t phys); #define unxlate_dev_mem_ptr unxlate_dev_mem_ptr void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr); -/* - * Convert a virtual cached pointer to an uncached pointer - */ -#define xlate_dev_kmem_ptr(p) p - #define IO_SPACE_LIMIT 0 void __iomem *ioremap_prot(phys_addr_t addr, size_t size, unsigned long prot); diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h index 35c2af9371a9..10b67f8aab99 100644 --- a/arch/s390/include/asm/pci.h +++ b/arch/s390/include/asm/pci.h @@ -204,7 +204,7 @@ extern unsigned int s390_pci_no_rid; struct zpci_dev *zpci_create_device(u32 fid, u32 fh, enum zpci_state state); int zpci_enable_device(struct zpci_dev *); int zpci_disable_device(struct zpci_dev *); -int zpci_configure_device(struct zpci_dev *zdev, u32 fh); +int zpci_scan_configured_device(struct zpci_dev *zdev, u32 fh); int zpci_deconfigure_device(struct zpci_dev *zdev); int zpci_register_ioat(struct zpci_dev *, u8, u64, u64, u64); diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c index b3beef64d3d4..31a605bcbc6e 100644 --- a/arch/s390/kernel/perf_cpum_cf.c +++ b/arch/s390/kernel/perf_cpum_cf.c @@ -230,9 +230,7 @@ static int __hw_perf_event_init(struct perf_event *event, unsigned int type) /* No support for kernel space counters only */ } else if (!attr->exclude_kernel && attr->exclude_user) { return -EOPNOTSUPP; - - /* Count user and kernel space */ - } else { + } else { /* Count user and kernel space */ if (ev >= ARRAY_SIZE(cpumf_generic_events_basic)) return -EOPNOTSUPP; ev = cpumf_generic_events_basic[ev]; @@ -402,12 +400,12 @@ static void cpumf_pmu_stop(struct perf_event *event, int flags) */ if (!atomic_dec_return(&cpuhw->ctr_set[hwc->config_base])) ctr_set_stop(&cpuhw->state, hwc->config_base); - event->hw.state |= PERF_HES_STOPPED; + hwc->state |= PERF_HES_STOPPED; } if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) { hw_perf_event_update(event); - event->hw.state |= PERF_HES_UPTODATE; + hwc->state |= PERF_HES_UPTODATE; } } @@ -430,8 +428,6 @@ static int cpumf_pmu_add(struct perf_event *event, int flags) if (flags & PERF_EF_START) cpumf_pmu_start(event, PERF_EF_RELOAD); - perf_event_update_userpage(event); - return 0; } @@ -451,8 +447,6 @@ static void cpumf_pmu_del(struct perf_event *event, int flags) */ if (!atomic_read(&cpuhw->ctr_set[event->hw.config_base])) ctr_set_disable(&cpuhw->state, event->hw.config_base); - - perf_event_update_userpage(event); } /* diff --git a/arch/s390/kernel/perf_cpum_cf_common.c b/arch/s390/kernel/perf_cpum_cf_common.c index 3bced89caffb..6d53215c8484 100644 --- a/arch/s390/kernel/perf_cpum_cf_common.c +++ b/arch/s390/kernel/perf_cpum_cf_common.c @@ -170,6 +170,52 @@ static int cpum_cf_offline_cpu(unsigned int cpu) return cpum_cf_setup(cpu, PMC_RELEASE); } +/* Return the maximum possible counter set size (in number of 8 byte counters) + * depending on type and model number. + */ +size_t cpum_cf_ctrset_size(enum cpumf_ctr_set ctrset, + struct cpumf_ctr_info *info) +{ + size_t ctrset_size = 0; + + switch (ctrset) { + case CPUMF_CTR_SET_BASIC: + if (info->cfvn >= 1) + ctrset_size = 6; + break; + case CPUMF_CTR_SET_USER: + if (info->cfvn == 1) + ctrset_size = 6; + else if (info->cfvn >= 3) + ctrset_size = 2; + break; + case CPUMF_CTR_SET_CRYPTO: + if (info->csvn >= 1 && info->csvn <= 5) + ctrset_size = 16; + else if (info->csvn == 6) + ctrset_size = 20; + break; + case CPUMF_CTR_SET_EXT: + if (info->csvn == 1) + ctrset_size = 32; + else if (info->csvn == 2) + ctrset_size = 48; + else if (info->csvn >= 3 && info->csvn <= 5) + ctrset_size = 128; + else if (info->csvn == 6) + ctrset_size = 160; + break; + case CPUMF_CTR_SET_MT_DIAG: + if (info->csvn > 3) + ctrset_size = 48; + break; + case CPUMF_CTR_SET_MAX: + break; + } + + return ctrset_size; +} + static int __init cpum_cf_init(void) { int rc; diff --git a/arch/s390/kernel/perf_cpum_cf_diag.c b/arch/s390/kernel/perf_cpum_cf_diag.c index 2e3e7edbe3a0..08c985c1097c 100644 --- a/arch/s390/kernel/perf_cpum_cf_diag.c +++ b/arch/s390/kernel/perf_cpum_cf_diag.c @@ -316,52 +316,6 @@ static void cf_diag_read(struct perf_event *event) debug_sprintf_event(cf_diag_dbg, 5, "%s event %p\n", __func__, event); } -/* Return the maximum possible counter set size (in number of 8 byte counters) - * depending on type and model number. - */ -static size_t cf_diag_ctrset_size(enum cpumf_ctr_set ctrset, - struct cpumf_ctr_info *info) -{ - size_t ctrset_size = 0; - - switch (ctrset) { - case CPUMF_CTR_SET_BASIC: - if (info->cfvn >= 1) - ctrset_size = 6; - break; - case CPUMF_CTR_SET_USER: - if (info->cfvn == 1) - ctrset_size = 6; - else if (info->cfvn >= 3) - ctrset_size = 2; - break; - case CPUMF_CTR_SET_CRYPTO: - if (info->csvn >= 1 && info->csvn <= 5) - ctrset_size = 16; - else if (info->csvn == 6) - ctrset_size = 20; - break; - case CPUMF_CTR_SET_EXT: - if (info->csvn == 1) - ctrset_size = 32; - else if (info->csvn == 2) - ctrset_size = 48; - else if (info->csvn >= 3 && info->csvn <= 5) - ctrset_size = 128; - else if (info->csvn == 6) - ctrset_size = 160; - break; - case CPUMF_CTR_SET_MT_DIAG: - if (info->csvn > 3) - ctrset_size = 48; - break; - case CPUMF_CTR_SET_MAX: - break; - } - - return ctrset_size; -} - /* Calculate memory needed to store all counter sets together with header and * trailer data. This is independend of the counter set authorization which * can vary depending on the configuration. @@ -372,7 +326,7 @@ static size_t cf_diag_ctrset_maxsize(struct cpumf_ctr_info *info) enum cpumf_ctr_set i; for (i = CPUMF_CTR_SET_BASIC; i < CPUMF_CTR_SET_MAX; ++i) { - size_t size = cf_diag_ctrset_size(i, info); + size_t size = cpum_cf_ctrset_size(i, info); if (size) max_size += size * sizeof(u64) + @@ -405,7 +359,7 @@ static size_t cf_diag_getctrset(struct cf_ctrset_entry *ctrdata, int ctrset, ctrdata->def = CF_DIAG_CTRSET_DEF; ctrdata->set = ctrset; ctrdata->res1 = 0; - ctrset_size = cf_diag_ctrset_size(ctrset, &cpuhw->info); + ctrset_size = cpum_cf_ctrset_size(ctrset, &cpuhw->info); if (ctrset_size) { /* Save data */ need = ctrset_size * sizeof(u64) + sizeof(*ctrdata); @@ -845,7 +799,7 @@ static void cf_diag_cpu_read(void *parm) if (!(p->sets & cpumf_ctr_ctl[set])) continue; /* Counter set not in list */ - set_size = cf_diag_ctrset_size(set, &cpuhw->info); + set_size = cpum_cf_ctrset_size(set, &cpuhw->info); space = sizeof(csd->data) - csd->used; space = cf_diag_cpuset_read(sp, set, set_size, space); if (space) { @@ -975,7 +929,7 @@ static size_t cf_diag_needspace(unsigned int sets) for (i = CPUMF_CTR_SET_BASIC; i < CPUMF_CTR_SET_MAX; ++i) { if (!(sets & cpumf_ctr_ctl[i])) continue; - bytes += cf_diag_ctrset_size(i, &cpuhw->info) * sizeof(u64) + + bytes += cpum_cf_ctrset_size(i, &cpuhw->info) * sizeof(u64) + sizeof(((struct s390_ctrset_setdata *)0)->set) + sizeof(((struct s390_ctrset_setdata *)0)->no_cnts); } diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index 72134f9f6ff5..5aab59ad5688 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -937,9 +937,9 @@ static int __init setup_hwcaps(void) if (MACHINE_HAS_VX) { elf_hwcap |= HWCAP_S390_VXRS; if (test_facility(134)) - elf_hwcap |= HWCAP_S390_VXRS_EXT; - if (test_facility(135)) elf_hwcap |= HWCAP_S390_VXRS_BCD; + if (test_facility(135)) + elf_hwcap |= HWCAP_S390_VXRS_EXT; if (test_facility(148)) elf_hwcap |= HWCAP_S390_VXRS_EXT2; if (test_facility(152)) diff --git a/arch/s390/kernel/syscall.c b/arch/s390/kernel/syscall.c index bc8e650e377d..4e5cc7d2364e 100644 --- a/arch/s390/kernel/syscall.c +++ b/arch/s390/kernel/syscall.c @@ -142,6 +142,7 @@ void do_syscall(struct pt_regs *regs) void noinstr __do_syscall(struct pt_regs *regs, int per_trap) { + add_random_kstack_offset(); enter_from_user_mode(regs); memcpy(®s->gprs[8], S390_lowcore.save_area_sync, 8 * sizeof(unsigned long)); diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c index 63021d484626..8dd23c703718 100644 --- a/arch/s390/kernel/traps.c +++ b/arch/s390/kernel/traps.c @@ -17,6 +17,7 @@ #include "asm/ptrace.h" #include <linux/kprobes.h> #include <linux/kdebug.h> +#include <linux/randomize_kstack.h> #include <linux/extable.h> #include <linux/ptrace.h> #include <linux/sched.h> @@ -301,6 +302,7 @@ void noinstr __do_pgm_check(struct pt_regs *regs) unsigned int trapnr, syscall_redirect = 0; irqentry_state_t state; + add_random_kstack_offset(); regs->int_code = *(u32 *)&S390_lowcore.pgm_ilc; regs->int_parm_long = S390_lowcore.trans_exc_code; diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c index 3b5a4d25ca9b..da36d13ffc16 100644 --- a/arch/s390/mm/hugetlbpage.c +++ b/arch/s390/mm/hugetlbpage.c @@ -189,7 +189,7 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm, return pte; } -pte_t *huge_pte_alloc(struct mm_struct *mm, +pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, unsigned long sz) { pgd_t *pgdp; diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c index c01b6dbac7cf..b0993e05affe 100644 --- a/arch/s390/pci/pci.c +++ b/arch/s390/pci/pci.c @@ -738,17 +738,19 @@ error: } /** - * zpci_configure_device() - Configure a zpci_dev + * zpci_scan_configured_device() - Scan a freshly configured zpci_dev * @zdev: The zpci_dev to be configured * @fh: The general function handle supplied by the platform * * Given a device in the configuration state Configured, enables, scans and - * adds it to the common code PCI subsystem. If any failure occurs, the - * zpci_dev is left disabled. + * adds it to the common code PCI subsystem if possible. If the PCI device is + * parked because we can not yet create a PCI bus because we have not seen + * function 0, it is ignored but will be scanned once function 0 appears. + * If any failure occurs, the zpci_dev is left disabled. * * Return: 0 on success, or an error code otherwise */ -int zpci_configure_device(struct zpci_dev *zdev, u32 fh) +int zpci_scan_configured_device(struct zpci_dev *zdev, u32 fh) { int rc; diff --git a/arch/s390/pci/pci_event.c b/arch/s390/pci/pci_event.c index 1178b48a66df..cd447b96b4b1 100644 --- a/arch/s390/pci/pci_event.c +++ b/arch/s390/pci/pci_event.c @@ -76,8 +76,6 @@ void zpci_event_error(void *data) static void zpci_event_hard_deconfigured(struct zpci_dev *zdev, u32 fh) { - enum zpci_state state; - zdev->fh = fh; /* Give the driver a hint that the function is * already unusable. @@ -88,15 +86,12 @@ static void zpci_event_hard_deconfigured(struct zpci_dev *zdev, u32 fh) */ zpci_disable_device(zdev); zdev->state = ZPCI_FN_STATE_STANDBY; - if (!clp_get_state(zdev->fid, &state) && - state == ZPCI_FN_STATE_RESERVED) { - zpci_zdev_put(zdev); - } } static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf) { struct zpci_dev *zdev = get_zdev_by_fid(ccdf->fid); + enum zpci_state state; zpci_err("avail CCDF:\n"); zpci_err_hex(ccdf, sizeof(*ccdf)); @@ -113,7 +108,7 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf) break; zdev->state = ZPCI_FN_STATE_CONFIGURED; } - zpci_configure_device(zdev, ccdf->fh); + zpci_scan_configured_device(zdev, ccdf->fh); break; case 0x0302: /* Reserved -> Standby */ if (!zdev) @@ -123,13 +118,28 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf) break; case 0x0303: /* Deconfiguration requested */ if (zdev) { + /* The event may have been queued before we confirgured + * the device. + */ + if (zdev->state != ZPCI_FN_STATE_CONFIGURED) + break; zdev->fh = ccdf->fh; zpci_deconfigure_device(zdev); } break; case 0x0304: /* Configured -> Standby|Reserved */ - if (zdev) - zpci_event_hard_deconfigured(zdev, ccdf->fh); + if (zdev) { + /* The event may have been queued before we confirgured + * the device.: + */ + if (zdev->state == ZPCI_FN_STATE_CONFIGURED) + zpci_event_hard_deconfigured(zdev, ccdf->fh); + /* The 0x0304 event may immediately reserve the device */ + if (!clp_get_state(zdev->fid, &state) && + state == ZPCI_FN_STATE_RESERVED) { + zpci_zdev_put(zdev); + } + } break; case 0x0306: /* 0x308 or 0x302 for multiple devices */ zpci_remove_reserved_devices(); diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index e798e55915c2..68129537e350 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig @@ -2,6 +2,8 @@ config SUPERH def_bool y select ARCH_32BIT_OFF_T + select ARCH_ENABLE_MEMORY_HOTPLUG if SPARSEMEM && MMU + select ARCH_ENABLE_MEMORY_HOTREMOVE if SPARSEMEM && MMU select ARCH_HAVE_CUSTOM_GPIO_H select ARCH_HAVE_NMI_SAFE_CMPXCHG if (GUSA_RB || CPU_SH4A) select ARCH_HAS_BINFMT_FLAT if !MMU @@ -101,9 +103,6 @@ config SYS_SUPPORTS_APM_EMULATION bool select ARCH_SUSPEND_POSSIBLE -config SYS_SUPPORTS_HUGETLBFS - bool - config SYS_SUPPORTS_SMP bool @@ -175,12 +174,12 @@ config CPU_SH3 config CPU_SH4 bool + select ARCH_SUPPORTS_HUGETLBFS if MMU select CPU_HAS_INTEVT select CPU_HAS_SR_RB select CPU_HAS_FPU if !CPU_SH4AL_DSP select SH_INTC select SYS_SUPPORTS_SH_TMU - select SYS_SUPPORTS_HUGETLBFS if MMU config CPU_SH4A bool diff --git a/arch/sh/configs/edosk7705_defconfig b/arch/sh/configs/edosk7705_defconfig index ef7cc31997b1..9ee35269bee2 100644 --- a/arch/sh/configs/edosk7705_defconfig +++ b/arch/sh/configs/edosk7705_defconfig @@ -23,7 +23,6 @@ CONFIG_SH_PCLK_FREQ=31250000 # CONFIG_INPUT is not set # CONFIG_SERIO is not set # CONFIG_VT is not set -# CONFIG_DEVKMEM is not set # CONFIG_UNIX98_PTYS is not set # CONFIG_LEGACY_PTYS is not set # CONFIG_HW_RANDOM is not set diff --git a/arch/sh/configs/se7206_defconfig b/arch/sh/configs/se7206_defconfig index 315b04a8dd2f..601d062250d1 100644 --- a/arch/sh/configs/se7206_defconfig +++ b/arch/sh/configs/se7206_defconfig @@ -71,7 +71,6 @@ CONFIG_SMC91X=y # CONFIG_INPUT is not set # CONFIG_SERIO is not set # CONFIG_VT is not set -# CONFIG_DEVKMEM is not set CONFIG_SERIAL_SH_SCI=y CONFIG_SERIAL_SH_SCI_NR_UARTS=4 CONFIG_SERIAL_SH_SCI_CONSOLE=y diff --git a/arch/sh/configs/sh2007_defconfig b/arch/sh/configs/sh2007_defconfig index 99975db461d8..79f02f1c0dc8 100644 --- a/arch/sh/configs/sh2007_defconfig +++ b/arch/sh/configs/sh2007_defconfig @@ -75,7 +75,6 @@ CONFIG_INPUT_FF_MEMLESS=y # CONFIG_INPUT_MOUSE is not set # CONFIG_SERIO is not set CONFIG_VT_HW_CONSOLE_BINDING=y -# CONFIG_DEVKMEM is not set CONFIG_SERIAL_SH_SCI=y CONFIG_SERIAL_SH_SCI_CONSOLE=y # CONFIG_LEGACY_PTYS is not set diff --git a/arch/sh/configs/sh7724_generic_defconfig b/arch/sh/configs/sh7724_generic_defconfig index 2c46c0004780..cbc9389a89a8 100644 --- a/arch/sh/configs/sh7724_generic_defconfig +++ b/arch/sh/configs/sh7724_generic_defconfig @@ -18,7 +18,6 @@ CONFIG_CPU_IDLE=y # CONFIG_INPUT is not set # CONFIG_SERIO is not set # CONFIG_VT is not set -# CONFIG_DEVKMEM is not set CONFIG_SERIAL_SH_SCI=y CONFIG_SERIAL_SH_SCI_NR_UARTS=6 CONFIG_SERIAL_SH_SCI_CONSOLE=y diff --git a/arch/sh/configs/sh7770_generic_defconfig b/arch/sh/configs/sh7770_generic_defconfig index 88193153e51b..ee2357deba0f 100644 --- a/arch/sh/configs/sh7770_generic_defconfig +++ b/arch/sh/configs/sh7770_generic_defconfig @@ -20,7 +20,6 @@ CONFIG_CPU_IDLE=y # CONFIG_INPUT is not set # CONFIG_SERIO is not set # CONFIG_VT is not set -# CONFIG_DEVKMEM is not set CONFIG_SERIAL_SH_SCI=y CONFIG_SERIAL_SH_SCI_NR_UARTS=6 CONFIG_SERIAL_SH_SCI_CONSOLE=y diff --git a/arch/sh/configs/sh7785lcr_32bit_defconfig b/arch/sh/configs/sh7785lcr_32bit_defconfig index 9b885c14c400..5c725c75fcef 100644 --- a/arch/sh/configs/sh7785lcr_32bit_defconfig +++ b/arch/sh/configs/sh7785lcr_32bit_defconfig @@ -66,7 +66,6 @@ CONFIG_INPUT_FF_MEMLESS=m CONFIG_INPUT_EVDEV=y CONFIG_INPUT_EVBUG=m CONFIG_VT_HW_CONSOLE_BINDING=y -# CONFIG_DEVKMEM is not set CONFIG_SERIAL_SH_SCI=y CONFIG_SERIAL_SH_SCI_NR_UARTS=6 CONFIG_SERIAL_SH_SCI_CONSOLE=y diff --git a/arch/sh/include/asm/bitops.h b/arch/sh/include/asm/bitops.h index 450b5854d7c6..3b6c7b5b7ec9 100644 --- a/arch/sh/include/asm/bitops.h +++ b/arch/sh/include/asm/bitops.h @@ -58,15 +58,16 @@ static inline unsigned long __ffs(unsigned long word) return result; } -#include <asm-generic/bitops/find.h> #include <asm-generic/bitops/ffs.h> #include <asm-generic/bitops/hweight.h> #include <asm-generic/bitops/lock.h> #include <asm-generic/bitops/sched.h> -#include <asm-generic/bitops/le.h> #include <asm-generic/bitops/ext2-atomic.h> #include <asm-generic/bitops/fls.h> #include <asm-generic/bitops/__fls.h> #include <asm-generic/bitops/fls64.h> +#include <asm-generic/bitops/le.h> +#include <asm-generic/bitops/find.h> + #endif /* __ASM_SH_BITOPS_H */ diff --git a/arch/sh/include/asm/io.h b/arch/sh/include/asm/io.h index 6d5c6463bc07..cf9a3ec32406 100644 --- a/arch/sh/include/asm/io.h +++ b/arch/sh/include/asm/io.h @@ -283,11 +283,6 @@ static inline void __iomem *ioremap_prot(phys_addr_t offset, unsigned long size, */ #define xlate_dev_mem_ptr(p) __va(p) -/* - * Convert a virtual cached pointer to an uncached pointer - */ -#define xlate_dev_kmem_ptr(p) p - #define ARCH_HAS_VALID_PHYS_ADDR_RANGE int valid_phys_addr_range(phys_addr_t addr, size_t size); int valid_mmap_phys_addr_range(unsigned long pfn, size_t size); diff --git a/arch/sh/kernel/ftrace.c b/arch/sh/kernel/ftrace.c index 0646c5961846..295c43315bbe 100644 --- a/arch/sh/kernel/ftrace.c +++ b/arch/sh/kernel/ftrace.c @@ -67,7 +67,7 @@ static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr) * Modifying code must take extra care. On an SMP machine, if * the code being modified is also being executed on another CPU * that CPU will have undefined results and possibly take a GPF. - * We use kstop_machine to stop other CPUS from exectuing code. + * We use kstop_machine to stop other CPUS from executing code. * But this does not stop NMIs from happening. We still need * to protect against that. We separate out the modification of * the code to take care of this. diff --git a/arch/sh/mm/Kconfig b/arch/sh/mm/Kconfig index 77aa2f802d8d..d551a9cac41e 100644 --- a/arch/sh/mm/Kconfig +++ b/arch/sh/mm/Kconfig @@ -136,14 +136,6 @@ config ARCH_SPARSEMEM_DEFAULT config ARCH_SELECT_MEMORY_MODEL def_bool y -config ARCH_ENABLE_MEMORY_HOTPLUG - def_bool y - depends on SPARSEMEM && MMU - -config ARCH_ENABLE_MEMORY_HOTREMOVE - def_bool y - depends on SPARSEMEM && MMU - config ARCH_MEMORY_PROBE def_bool y depends on MEMORY_HOTPLUG diff --git a/arch/sh/mm/hugetlbpage.c b/arch/sh/mm/hugetlbpage.c index 220d7bc43d2b..999ab5916e69 100644 --- a/arch/sh/mm/hugetlbpage.c +++ b/arch/sh/mm/hugetlbpage.c @@ -21,7 +21,7 @@ #include <asm/tlbflush.h> #include <asm/cacheflush.h> -pte_t *huge_pte_alloc(struct mm_struct *mm, +pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, unsigned long sz) { pgd_t *pgd; diff --git a/arch/sparc/configs/sparc64_defconfig b/arch/sparc/configs/sparc64_defconfig index 12a4fb0bd52a..18099099583e 100644 --- a/arch/sparc/configs/sparc64_defconfig +++ b/arch/sparc/configs/sparc64_defconfig @@ -122,7 +122,6 @@ CONFIG_INPUT_SPARCSPKR=y # CONFIG_SERIO_SERPORT is not set CONFIG_SERIO_PCIPS2=m CONFIG_SERIO_RAW=m -# CONFIG_DEVKMEM is not set CONFIG_SERIAL_SUNSU=y CONFIG_SERIAL_SUNSU_CONSOLE=y CONFIG_SERIAL_SUNSAB=y diff --git a/arch/sparc/include/asm/ftrace.h b/arch/sparc/include/asm/ftrace.h index d3aa1a524431..e284394cb3aa 100644 --- a/arch/sparc/include/asm/ftrace.h +++ b/arch/sparc/include/asm/ftrace.h @@ -17,7 +17,7 @@ void _mcount(void); #endif #ifdef CONFIG_DYNAMIC_FTRACE -/* reloction of mcount call site is the same as the address */ +/* relocation of mcount call site is the same as the address */ static inline unsigned long ftrace_call_adjust(unsigned long addr) { return addr; diff --git a/arch/sparc/include/asm/io_64.h b/arch/sparc/include/asm/io_64.h index 9fbfc9574432..5ffa820dcd4d 100644 --- a/arch/sparc/include/asm/io_64.h +++ b/arch/sparc/include/asm/io_64.h @@ -454,11 +454,6 @@ void sbus_set_sbus64(struct device *, int); */ #define xlate_dev_mem_ptr(p) __va(p) -/* - * Convert a virtual cached pointer to an uncached pointer - */ -#define xlate_dev_kmem_ptr(p) p - #endif #endif /* !(__SPARC64_IO_H) */ diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c index ad4b42f04988..04d8790f6c32 100644 --- a/arch/sparc/mm/hugetlbpage.c +++ b/arch/sparc/mm/hugetlbpage.c @@ -279,7 +279,7 @@ unsigned long pud_leaf_size(pud_t pud) { return 1UL << tte_to_shift(*(pte_t *)&p unsigned long pmd_leaf_size(pmd_t pmd) { return 1UL << tte_to_shift(*(pte_t *)&pmd); } unsigned long pte_leaf_size(pte_t pte) { return 1UL << tte_to_shift(pte); } -pte_t *huge_pte_alloc(struct mm_struct *mm, +pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, unsigned long sz) { pgd_t *pgd; diff --git a/arch/um/Kconfig.debug b/arch/um/Kconfig.debug index 315d368e63ad..1dfb2959c73b 100644 --- a/arch/um/Kconfig.debug +++ b/arch/um/Kconfig.debug @@ -17,6 +17,7 @@ config GCOV bool "Enable gcov support" depends on DEBUG_INFO depends on !KCOV + depends on !MODULES help This option allows developers to retrieve coverage data from a UML session. diff --git a/arch/um/drivers/cow.h b/arch/um/drivers/cow.h index 103adac691ed..9a67c017000f 100644 --- a/arch/um/drivers/cow.h +++ b/arch/um/drivers/cow.h @@ -24,10 +24,3 @@ extern void cow_sizes(int version, __u64 size, int sectorsize, int align, int *data_offset_out); #endif - -/* - * --------------------------------------------------------------------------- - * Local variables: - * c-file-style: "linux" - * End: - */ diff --git a/arch/um/drivers/hostaudio_kern.c b/arch/um/drivers/hostaudio_kern.c index d35d3f305a31..5b064d360cb7 100644 --- a/arch/um/drivers/hostaudio_kern.c +++ b/arch/um/drivers/hostaudio_kern.c @@ -122,13 +122,11 @@ static ssize_t hostaudio_write(struct file *file, const char __user *buffer, static __poll_t hostaudio_poll(struct file *file, struct poll_table_struct *wait) { - __poll_t mask = 0; - #ifdef DEBUG printk(KERN_DEBUG "hostaudio: poll called (unimplemented)\n"); #endif - return mask; + return 0; } static long hostaudio_ioctl(struct file *file, diff --git a/arch/um/drivers/vector_kern.c b/arch/um/drivers/vector_kern.c index 47a02e60898d..d27a2a9faf3e 100644 --- a/arch/um/drivers/vector_kern.c +++ b/arch/um/drivers/vector_kern.c @@ -8,7 +8,6 @@ * Copyright (C) 2001 by various other people who didn't put their name here. */ -#include <linux/version.h> #include <linux/memblock.h> #include <linux/etherdevice.h> #include <linux/ethtool.h> diff --git a/arch/um/include/asm/pgtable.h b/arch/um/include/asm/pgtable.h index def376194dce..b9e20bbe2f75 100644 --- a/arch/um/include/asm/pgtable.h +++ b/arch/um/include/asm/pgtable.h @@ -302,7 +302,7 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) struct mm_struct; extern pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr); -#define update_mmu_cache(vma,address,ptep) do ; while (0) +#define update_mmu_cache(vma,address,ptep) do {} while (0) /* Encode and de-code a swap entry */ #define __swp_type(x) (((x).val >> 5) & 0x1f) diff --git a/arch/um/include/uapi/asm/Kbuild b/arch/um/include/uapi/asm/Kbuild new file mode 100644 index 000000000000..f66554cd5c45 --- /dev/null +++ b/arch/um/include/uapi/asm/Kbuild @@ -0,0 +1 @@ +# SPDX-License-Identifier: GPL-2.0 diff --git a/arch/um/kernel/Makefile b/arch/um/kernel/Makefile index 5aa882011e04..e698e0c7dbdc 100644 --- a/arch/um/kernel/Makefile +++ b/arch/um/kernel/Makefile @@ -21,7 +21,6 @@ obj-y = config.o exec.o exitcode.o irq.o ksyms.o mem.o \ obj-$(CONFIG_BLK_DEV_INITRD) += initrd.o obj-$(CONFIG_GPROF) += gprof_syms.o -obj-$(CONFIG_GCOV) += gmon_syms.o obj-$(CONFIG_EARLY_PRINTK) += early_printk.o obj-$(CONFIG_STACKTRACE) += stacktrace.o diff --git a/arch/um/kernel/dyn.lds.S b/arch/um/kernel/dyn.lds.S index dacbfabf66d8..2f2a8ce92f1e 100644 --- a/arch/um/kernel/dyn.lds.S +++ b/arch/um/kernel/dyn.lds.S @@ -6,6 +6,12 @@ OUTPUT_ARCH(ELF_ARCH) ENTRY(_start) jiffies = jiffies_64; +VERSION { + { + local: *; + }; +} + SECTIONS { PROVIDE (__executable_start = START); diff --git a/arch/um/kernel/gmon_syms.c b/arch/um/kernel/gmon_syms.c deleted file mode 100644 index 9361a8eb9bf1..000000000000 --- a/arch/um/kernel/gmon_syms.c +++ /dev/null @@ -1,16 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) - */ - -#include <linux/module.h> - -extern void __bb_init_func(void *) __attribute__((weak)); -EXPORT_SYMBOL(__bb_init_func); - -extern void __gcov_init(void *) __attribute__((weak)); -EXPORT_SYMBOL(__gcov_init); -extern void __gcov_merge_add(void *, unsigned int) __attribute__((weak)); -EXPORT_SYMBOL(__gcov_merge_add); -extern void __gcov_exit(void) __attribute__((weak)); -EXPORT_SYMBOL(__gcov_exit); diff --git a/arch/um/kernel/mem.c b/arch/um/kernel/mem.c index 9019ff5905b1..8e636ce02949 100644 --- a/arch/um/kernel/mem.c +++ b/arch/um/kernel/mem.c @@ -72,8 +72,7 @@ static void __init one_page_table_init(pmd_t *pmd) set_pmd(pmd, __pmd(_KERNPG_TABLE + (unsigned long) __pa(pte))); - if (pte != pte_offset_kernel(pmd, 0)) - BUG(); + BUG_ON(pte != pte_offset_kernel(pmd, 0)); } } diff --git a/arch/um/kernel/uml.lds.S b/arch/um/kernel/uml.lds.S index 45d957d7004c..7a8e2b123e29 100644 --- a/arch/um/kernel/uml.lds.S +++ b/arch/um/kernel/uml.lds.S @@ -7,6 +7,12 @@ OUTPUT_ARCH(ELF_ARCH) ENTRY(_start) jiffies = jiffies_64; +VERSION { + { + local: *; + }; +} + SECTIONS { /* This must contain the right address - not quite the default ELF one.*/ diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index dac15f646f79..0045e1b44190 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -60,7 +60,13 @@ config X86 select ACPI_SYSTEM_POWER_STATES_SUPPORT if ACPI select ARCH_32BIT_OFF_T if X86_32 select ARCH_CLOCKSOURCE_INIT + select ARCH_ENABLE_HUGEPAGE_MIGRATION if X86_64 && HUGETLB_PAGE && MIGRATION + select ARCH_ENABLE_MEMORY_HOTPLUG if X86_64 || (X86_32 && HIGHMEM) + select ARCH_ENABLE_MEMORY_HOTREMOVE if MEMORY_HOTPLUG + select ARCH_ENABLE_SPLIT_PMD_PTLOCK if X86_64 || X86_PAE + select ARCH_ENABLE_THP_MIGRATION if X86_64 && TRANSPARENT_HUGEPAGE select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI + select ARCH_HAS_CACHE_LINE_SIZE select ARCH_HAS_DEBUG_VIRTUAL select ARCH_HAS_DEBUG_VM_PGTABLE if !X86_PAE select ARCH_HAS_DEVMEM_IS_ALLOWED @@ -165,6 +171,7 @@ config X86 select HAVE_ARCH_TRANSPARENT_HUGEPAGE select HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD if X86_64 select HAVE_ARCH_USERFAULTFD_WP if X86_64 && USERFAULTFD + select HAVE_ARCH_USERFAULTFD_MINOR if X86_64 && USERFAULTFD select HAVE_ARCH_VMAP_STACK if X86_64 select HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET select HAVE_ARCH_WITHIN_STACK_FRAMES @@ -315,9 +322,6 @@ config GENERIC_CALIBRATE_DELAY config ARCH_HAS_CPU_RELAX def_bool y -config ARCH_HAS_CACHE_LINE_SIZE - def_bool y - config ARCH_HAS_FILTER_PGPROT def_bool y @@ -2428,30 +2432,13 @@ config ARCH_HAS_ADD_PAGES def_bool y depends on X86_64 && ARCH_ENABLE_MEMORY_HOTPLUG -config ARCH_ENABLE_MEMORY_HOTPLUG - def_bool y - depends on X86_64 || (X86_32 && HIGHMEM) - -config ARCH_ENABLE_MEMORY_HOTREMOVE +config ARCH_MHP_MEMMAP_ON_MEMORY_ENABLE def_bool y - depends on MEMORY_HOTPLUG config USE_PERCPU_NUMA_NODE_ID def_bool y depends on NUMA -config ARCH_ENABLE_SPLIT_PMD_PTLOCK - def_bool y - depends on X86_64 || X86_PAE - -config ARCH_ENABLE_HUGEPAGE_MIGRATION - def_bool y - depends on X86_64 && HUGETLB_PAGE && MIGRATION - -config ARCH_ENABLE_THP_MIGRATION - def_bool y - depends on X86_64 && TRANSPARENT_HUGEPAGE - menu "Power management and ACPI options" config ARCH_HIBERNATION_HEADER diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 43cbfc84153a..5e1f38179f49 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -156,7 +156,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, unsigned long arg, #endif /* Kernel thread ? */ - if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) { + if (unlikely(p->flags & PF_KTHREAD)) { memset(childregs, 0, sizeof(struct pt_regs)); kthread_frame_init(frame, sp, arg); return 0; @@ -172,6 +172,23 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, unsigned long arg, task_user_gs(p) = get_user_gs(current_pt_regs()); #endif + if (unlikely(p->flags & PF_IO_WORKER)) { + /* + * An IO thread is a user space thread, but it doesn't + * return to ret_after_fork(). + * + * In order to indicate that to tools like gdb, + * we reset the stack and instruction pointers. + * + * It does the same kernel frame setup to return to a kernel + * function that a kernel thread does. + */ + childregs->sp = 0; + childregs->ip = 0; + kthread_frame_init(frame, sp, arg); + return 0; + } + /* Set a new TLS for the child thread? */ if (clone_flags & CLONE_SETTLS) ret = set_new_tls(p, tls); diff --git a/arch/x86/mm/pat/set_memory.c b/arch/x86/mm/pat/set_memory.c index 427980617557..156cd235659f 100644 --- a/arch/x86/mm/pat/set_memory.c +++ b/arch/x86/mm/pat/set_memory.c @@ -16,6 +16,8 @@ #include <linux/pci.h> #include <linux/vmalloc.h> #include <linux/libnvdimm.h> +#include <linux/vmstat.h> +#include <linux/kernel.h> #include <asm/e820/api.h> #include <asm/processor.h> @@ -91,6 +93,12 @@ static void split_page_count(int level) return; direct_pages_count[level]--; + if (system_state == SYSTEM_RUNNING) { + if (level == PG_LEVEL_2M) + count_vm_event(DIRECT_MAP_LEVEL2_SPLIT); + else if (level == PG_LEVEL_1G) + count_vm_event(DIRECT_MAP_LEVEL3_SPLIT); + } direct_pages_count[level - 1] += PTRS_PER_PTE; } diff --git a/arch/x86/pci/amd_bus.c b/arch/x86/pci/amd_bus.c index bfa50e65ef6c..ae744b6a0785 100644 --- a/arch/x86/pci/amd_bus.c +++ b/arch/x86/pci/amd_bus.c @@ -126,7 +126,7 @@ static int __init early_root_info_init(void) node = (reg >> 4) & 0x07; link = (reg >> 8) & 0x03; - info = alloc_pci_root_info(min_bus, max_bus, node, link); + alloc_pci_root_info(min_bus, max_bus, node, link); } /* diff --git a/arch/x86/um/Makefile b/arch/x86/um/Makefile index 77f70b969d14..5ccb18290d71 100644 --- a/arch/x86/um/Makefile +++ b/arch/x86/um/Makefile @@ -21,6 +21,7 @@ obj-y += checksum_32.o syscalls_32.o obj-$(CONFIG_ELF_CORE) += elfcore.o subarch-y = ../lib/string_32.o ../lib/atomic64_32.o ../lib/atomic64_cx8_32.o +subarch-y += ../lib/cmpxchg8b_emu.o ../lib/atomic64_386_32.o subarch-y += ../kernel/sys_ia32.o else diff --git a/arch/x86/um/asm/elf.h b/arch/x86/um/asm/elf.h index c907b20d4993..dcaf3b38a9e0 100644 --- a/arch/x86/um/asm/elf.h +++ b/arch/x86/um/asm/elf.h @@ -212,6 +212,6 @@ extern int elf_core_copy_fpregs(struct task_struct *t, elf_fpregset_t *fpu); extern long elf_aux_hwcap; #define ELF_HWCAP (elf_aux_hwcap) -#define SET_PERSONALITY(ex) do ; while(0) +#define SET_PERSONALITY(ex) do {} while(0) #endif diff --git a/arch/x86/um/shared/sysdep/stub_32.h b/arch/x86/um/shared/sysdep/stub_32.h index c3891c1ada26..b95db9daf0e8 100644 --- a/arch/x86/um/shared/sysdep/stub_32.h +++ b/arch/x86/um/shared/sysdep/stub_32.h @@ -77,7 +77,7 @@ static inline void trap_myself(void) __asm("int3"); } -static void inline remap_stack_and_trap(void) +static inline void remap_stack_and_trap(void) { __asm__ volatile ( "movl %%esp,%%ebx ;" diff --git a/arch/x86/xen/pci-swiotlb-xen.c b/arch/x86/xen/pci-swiotlb-xen.c index 19ae3e4fe4e9..54f9aa7e8457 100644 --- a/arch/x86/xen/pci-swiotlb-xen.c +++ b/arch/x86/xen/pci-swiotlb-xen.c @@ -59,7 +59,7 @@ int __init pci_xen_swiotlb_detect(void) void __init pci_xen_swiotlb_init(void) { if (xen_swiotlb) { - xen_swiotlb_init(1, true /* early */); + xen_swiotlb_init_early(); dma_ops = &xen_swiotlb_dma_ops; #ifdef CONFIG_PCI @@ -76,7 +76,7 @@ int pci_xen_swiotlb_init_late(void) if (xen_swiotlb) return 0; - rc = xen_swiotlb_init(1, false /* late */); + rc = xen_swiotlb_init(); if (rc) return rc; diff --git a/arch/xtensa/configs/xip_kc705_defconfig b/arch/xtensa/configs/xip_kc705_defconfig index 4f1ff9531f6a..062148e17135 100644 --- a/arch/xtensa/configs/xip_kc705_defconfig +++ b/arch/xtensa/configs/xip_kc705_defconfig @@ -72,7 +72,6 @@ CONFIG_MARVELL_PHY=y # CONFIG_INPUT_KEYBOARD is not set # CONFIG_INPUT_MOUSE is not set # CONFIG_SERIO is not set -CONFIG_DEVKMEM=y CONFIG_SERIAL_8250=y # CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set CONFIG_SERIAL_8250_CONSOLE=y |