diff options
Diffstat (limited to 'arch')
198 files changed, 1459 insertions, 1462 deletions
diff --git a/arch/alpha/Kbuild b/arch/alpha/Kbuild index c2302017403a..345d79df24bb 100644 --- a/arch/alpha/Kbuild +++ b/arch/alpha/Kbuild @@ -1,3 +1,6 @@ # SPDX-License-Identifier: GPL-2.0-only obj-y += kernel/ mm/ obj-$(CONFIG_MATHEMU) += math-emu/ + +# for cleaning +subdir- += boot diff --git a/arch/alpha/Makefile b/arch/alpha/Makefile index 52529ee42dac..881cb913e23a 100644 --- a/arch/alpha/Makefile +++ b/arch/alpha/Makefile @@ -55,9 +55,6 @@ $(boot)/vmlinux.gz: vmlinux bootimage bootpfile bootpzfile: vmlinux $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ -archclean: - $(Q)$(MAKE) $(clean)=$(boot) - archheaders: $(Q)$(MAKE) $(build)=arch/alpha/kernel/syscalls all diff --git a/arch/alpha/kernel/traps.c b/arch/alpha/kernel/traps.c index e805106409f7..2ae34702456c 100644 --- a/arch/alpha/kernel/traps.c +++ b/arch/alpha/kernel/traps.c @@ -129,9 +129,7 @@ dik_show_trace(unsigned long *sp, const char *loglvl) extern char _stext[], _etext[]; unsigned long tmp = *sp; sp++; - if (tmp < (unsigned long) &_stext) - continue; - if (tmp >= (unsigned long) &_etext) + if (!is_kernel_text(tmp)) continue; printk("%s[<%lx>] %pSR\n", loglvl, tmp, (void *)tmp); if (i > 40) { diff --git a/arch/arc/Kbuild b/arch/arc/Kbuild index 699d8cae9b1f..b94102fff68b 100644 --- a/arch/arc/Kbuild +++ b/arch/arc/Kbuild @@ -1,3 +1,6 @@ # SPDX-License-Identifier: GPL-2.0 obj-y += kernel/ obj-y += mm/ + +# for cleaning +subdir- += boot diff --git a/arch/arc/Makefile b/arch/arc/Makefile index 8782a03f24a8..f252e7b924e9 100644 --- a/arch/arc/Makefile +++ b/arch/arc/Makefile @@ -112,6 +112,3 @@ uImage: $(uimage-default-y) @$(kecho) ' Image $(boot)/uImage is ready' CLEAN_FILES += $(boot)/uImage - -archclean: - $(Q)$(MAKE) $(clean)=$(boot) diff --git a/arch/arc/kernel/process.c b/arch/arc/kernel/process.c index 3793876f42d9..8e90052f6f05 100644 --- a/arch/arc/kernel/process.c +++ b/arch/arc/kernel/process.c @@ -294,7 +294,7 @@ int elf_check_arch(const struct elf32_hdr *x) eflags = x->e_flags; if ((eflags & EF_ARC_OSABI_MSK) != EF_ARC_OSABI_CURRENT) { pr_err("ABI mismatch - you need newer toolchain\n"); - force_sigsegv(SIGSEGV); + force_fatal_sig(SIGSEGV); return 0; } diff --git a/arch/arm/Kbuild b/arch/arm/Kbuild index 5208f7061524..b506622e7e23 100644 --- a/arch/arm/Kbuild +++ b/arch/arm/Kbuild @@ -9,3 +9,6 @@ obj-y += kernel/ mm/ common/ obj-y += probes/ obj-y += net/ obj-y += crypto/ + +# for cleaning +subdir- += boot diff --git a/arch/arm/Makefile b/arch/arm/Makefile index 1c540157e283..a522716565c6 100644 --- a/arch/arm/Makefile +++ b/arch/arm/Makefile @@ -318,10 +318,6 @@ ifeq ($(CONFIG_VDSO),y) $(Q)$(MAKE) $(build)=arch/arm/vdso $@ endif -# We use MRPROPER_FILES and CLEAN_FILES now -archclean: - $(Q)$(MAKE) $(clean)=$(boot) - # My testing targets (bypasses dependencies) bp:; $(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/bootpImage diff --git a/arch/arm/include/asm/syscall.h b/arch/arm/include/asm/syscall.h index 24c19d63ff0a..dfeed440254a 100644 --- a/arch/arm/include/asm/syscall.h +++ b/arch/arm/include/asm/syscall.h @@ -77,16 +77,6 @@ static inline void syscall_get_arguments(struct task_struct *task, memcpy(args, ®s->ARM_r0 + 1, 5 * sizeof(args[0])); } -static inline void syscall_set_arguments(struct task_struct *task, - struct pt_regs *regs, - const unsigned long *args) -{ - regs->ARM_ORIG_r0 = args[0]; - args++; - - memcpy(®s->ARM_r0 + 1, args, 5 * sizeof(args[0])); -} - static inline int syscall_get_arch(struct task_struct *task) { /* ARM tasks don't change audit architectures on the fly. */ diff --git a/arch/arm/mm/kasan_init.c b/arch/arm/mm/kasan_init.c index 4b1619584b23..5ad0d6c56d56 100644 --- a/arch/arm/mm/kasan_init.c +++ b/arch/arm/mm/kasan_init.c @@ -32,7 +32,7 @@ pmd_t tmp_pmd_table[PTRS_PER_PMD] __page_aligned_bss; static __init void *kasan_alloc_block(size_t size) { return memblock_alloc_try_nid(size, size, __pa(MAX_DMA_ADDRESS), - MEMBLOCK_ALLOC_KASAN, NUMA_NO_NODE); + MEMBLOCK_ALLOC_NOLEAKTRACE, NUMA_NO_NODE); } static void __init kasan_pte_populate(pmd_t *pmdp, unsigned long addr, diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c index 7f1c106b746f..7619fbffcea2 100644 --- a/arch/arm/xen/enlighten.c +++ b/arch/arm/xen/enlighten.c @@ -442,7 +442,6 @@ EXPORT_SYMBOL_GPL(HYPERVISOR_hvm_op); EXPORT_SYMBOL_GPL(HYPERVISOR_memory_op); EXPORT_SYMBOL_GPL(HYPERVISOR_physdev_op); EXPORT_SYMBOL_GPL(HYPERVISOR_vcpu_op); -EXPORT_SYMBOL_GPL(HYPERVISOR_tmem_op); EXPORT_SYMBOL_GPL(HYPERVISOR_platform_op_raw); EXPORT_SYMBOL_GPL(HYPERVISOR_multicall); EXPORT_SYMBOL_GPL(HYPERVISOR_vm_assist); diff --git a/arch/arm/xen/hypercall.S b/arch/arm/xen/hypercall.S index b11bba542fac..f794dac9859a 100644 --- a/arch/arm/xen/hypercall.S +++ b/arch/arm/xen/hypercall.S @@ -88,7 +88,6 @@ HYPERCALL2(hvm_op); HYPERCALL2(memory_op); HYPERCALL2(physdev_op); HYPERCALL3(vcpu_op); -HYPERCALL1(tmem_op); HYPERCALL1(platform_op_raw); HYPERCALL2(multicall); HYPERCALL2(vm_assist); diff --git a/arch/arm64/Kbuild b/arch/arm64/Kbuild index ea7ab4ca81f9..5bfbf7d79c99 100644 --- a/arch/arm64/Kbuild +++ b/arch/arm64/Kbuild @@ -4,3 +4,6 @@ obj-$(CONFIG_KVM) += kvm/ obj-$(CONFIG_XEN) += xen/ obj-$(subst m,y,$(CONFIG_HYPERV)) += hyperv/ obj-$(CONFIG_CRYPTO) += crypto/ + +# for cleaning +subdir- += boot diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index c744b1e7b356..e8cfc5868aa8 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile @@ -182,13 +182,6 @@ ifeq ($(CONFIG_ARM64_USE_LSE_ATOMICS),y) endif endif - -# We use MRPROPER_FILES and CLEAN_FILES now -archclean: - $(Q)$(MAKE) $(clean)=$(boot) - $(Q)$(MAKE) $(clean)=arch/arm64/kernel/vdso - $(Q)$(MAKE) $(clean)=arch/arm64/kernel/vdso32 - ifeq ($(KBUILD_EXTMOD),) # We need to generate vdso-offsets.h before compiling certain files in kernel/. # In order to do that, we should use the archprepare target, but we can't since diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 84fbb52b4224..c4ba047a82d2 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -67,9 +67,15 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; * page table entry, taking care of 52-bit addresses. */ #ifdef CONFIG_ARM64_PA_BITS_52 -#define __pte_to_phys(pte) \ - ((pte_val(pte) & PTE_ADDR_LOW) | ((pte_val(pte) & PTE_ADDR_HIGH) << 36)) -#define __phys_to_pte_val(phys) (((phys) | ((phys) >> 36)) & PTE_ADDR_MASK) +static inline phys_addr_t __pte_to_phys(pte_t pte) +{ + return (pte_val(pte) & PTE_ADDR_LOW) | + ((pte_val(pte) & PTE_ADDR_HIGH) << 36); +} +static inline pteval_t __phys_to_pte_val(phys_addr_t phys) +{ + return (phys | (phys >> 36)) & PTE_ADDR_MASK; +} #else #define __pte_to_phys(pte) (pte_val(pte) & PTE_ADDR_MASK) #define __phys_to_pte_val(phys) (phys) diff --git a/arch/arm64/include/asm/syscall.h b/arch/arm64/include/asm/syscall.h index 03e20895453a..4cfe9b49709b 100644 --- a/arch/arm64/include/asm/syscall.h +++ b/arch/arm64/include/asm/syscall.h @@ -73,16 +73,6 @@ static inline void syscall_get_arguments(struct task_struct *task, memcpy(args, ®s->regs[1], 5 * sizeof(args[0])); } -static inline void syscall_set_arguments(struct task_struct *task, - struct pt_regs *regs, - const unsigned long *args) -{ - regs->orig_x0 = args[0]; - args++; - - memcpy(®s->regs[1], args, 5 * sizeof(args[0])); -} - /* * We don't care about endianness (__AUDIT_ARCH_LE bit) here because * AArch64 has the same system calls both on little- and big- endian. diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile index 3f1490bfb938..88b3e2a21408 100644 --- a/arch/arm64/kernel/Makefile +++ b/arch/arm64/kernel/Makefile @@ -81,3 +81,6 @@ extra-y += $(head-y) vmlinux.lds ifeq ($(CONFIG_DEBUG_EFI),y) AFLAGS_head.o += -DVMLINUX_PATH="\"$(realpath $(objtree)/vmlinux)\"" endif + +# for cleaning +subdir- += vdso vdso32 diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index ecbdff795f5e..6f3e677d88f1 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -573,15 +573,19 @@ static const struct arm64_ftr_bits ftr_raz[] = { ARM64_FTR_END, }; -#define ARM64_FTR_REG_OVERRIDE(id, table, ovr) { \ +#define __ARM64_FTR_REG_OVERRIDE(id_str, id, table, ovr) { \ .sys_id = id, \ .reg = &(struct arm64_ftr_reg){ \ - .name = #id, \ + .name = id_str, \ .override = (ovr), \ .ftr_bits = &((table)[0]), \ }} -#define ARM64_FTR_REG(id, table) ARM64_FTR_REG_OVERRIDE(id, table, &no_override) +#define ARM64_FTR_REG_OVERRIDE(id, table, ovr) \ + __ARM64_FTR_REG_OVERRIDE(#id, id, table, ovr) + +#define ARM64_FTR_REG(id, table) \ + __ARM64_FTR_REG_OVERRIDE(#id, id, table, &no_override) struct arm64_ftr_override __ro_after_init id_aa64mmfr1_override; struct arm64_ftr_override __ro_after_init id_aa64pfr1_override; @@ -2864,6 +2868,7 @@ bool this_cpu_has_cap(unsigned int n) return false; } +EXPORT_SYMBOL_GPL(this_cpu_has_cap); /* * This helper function is used in a narrow window when, diff --git a/arch/arm64/kernel/vdso/Makefile b/arch/arm64/kernel/vdso/Makefile index 945e6bb326e3..700767dfd221 100644 --- a/arch/arm64/kernel/vdso/Makefile +++ b/arch/arm64/kernel/vdso/Makefile @@ -23,7 +23,7 @@ btildflags-$(CONFIG_ARM64_BTI_KERNEL) += -z force-bti # potential future proofing if we end up with internal calls to the exported # routines, as x86 does (see 6f121e548f83 ("x86, vdso: Reimplement vdso.so # preparation in build-time C")). -ldflags-y := -shared -nostdlib -soname=linux-vdso.so.1 --hash-style=sysv \ +ldflags-y := -shared -soname=linux-vdso.so.1 --hash-style=sysv \ -Bsymbolic --build-id=sha1 -n $(btildflags-y) -T ccflags-y := -fno-common -fno-builtin -fno-stack-protector -ffixed-x18 diff --git a/arch/arm64/kernel/vdso32/Makefile b/arch/arm64/kernel/vdso32/Makefile index c8fec493a450..6c01b63ff56d 100644 --- a/arch/arm64/kernel/vdso32/Makefile +++ b/arch/arm64/kernel/vdso32/Makefile @@ -102,7 +102,7 @@ VDSO_AFLAGS += -D__ASSEMBLY__ # From arm vDSO Makefile VDSO_LDFLAGS += -Bsymbolic --no-undefined -soname=linux-vdso.so.1 VDSO_LDFLAGS += -z max-page-size=4096 -z common-page-size=4096 -VDSO_LDFLAGS += -nostdlib -shared --hash-style=sysv --build-id=sha1 +VDSO_LDFLAGS += -shared --hash-style=sysv --build-id=sha1 # Borrow vdsomunge.c from the arm vDSO diff --git a/arch/arm64/lib/Makefile b/arch/arm64/lib/Makefile index 0941180a86d3..29490be2546b 100644 --- a/arch/arm64/lib/Makefile +++ b/arch/arm64/lib/Makefile @@ -9,6 +9,8 @@ ifeq ($(CONFIG_KERNEL_MODE_NEON), y) obj-$(CONFIG_XOR_BLOCKS) += xor-neon.o CFLAGS_REMOVE_xor-neon.o += -mgeneral-regs-only CFLAGS_xor-neon.o += -ffreestanding +# Enable <arm_neon.h> +CFLAGS_xor-neon.o += -isystem $(shell $(CC) -print-file-name=include) endif lib-$(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) += uaccess_flushcache.o diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c index 5b996ca4d996..c12cd700598f 100644 --- a/arch/arm64/mm/kasan_init.c +++ b/arch/arm64/mm/kasan_init.c @@ -36,7 +36,7 @@ static phys_addr_t __init kasan_alloc_zeroed_page(int node) { void *p = memblock_alloc_try_nid(PAGE_SIZE, PAGE_SIZE, __pa(MAX_DMA_ADDRESS), - MEMBLOCK_ALLOC_KASAN, node); + MEMBLOCK_ALLOC_NOLEAKTRACE, node); if (!p) panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%llx\n", __func__, PAGE_SIZE, PAGE_SIZE, node, @@ -49,7 +49,8 @@ static phys_addr_t __init kasan_alloc_raw_page(int node) { void *p = memblock_alloc_try_nid_raw(PAGE_SIZE, PAGE_SIZE, __pa(MAX_DMA_ADDRESS), - MEMBLOCK_ALLOC_KASAN, node); + MEMBLOCK_ALLOC_NOLEAKTRACE, + node); if (!p) panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%llx\n", __func__, PAGE_SIZE, PAGE_SIZE, node, @@ -309,7 +310,7 @@ void __init kasan_init(void) kasan_init_depth(); #if defined(CONFIG_KASAN_GENERIC) /* CONFIG_KASAN_SW_TAGS also requires kasan_init_sw_tags(). */ - pr_info("KernelAddressSanitizer initialized\n"); + pr_info("KernelAddressSanitizer initialized (generic)\n"); #endif } diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index d77bf06d6a6d..acfae9b41cc8 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -96,7 +96,8 @@ static phys_addr_t __init early_pgtable_alloc(int shift) phys_addr_t phys; void *ptr; - phys = memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE); + phys = memblock_phys_alloc_range(PAGE_SIZE, PAGE_SIZE, 0, + MEMBLOCK_ALLOC_NOLEAKTRACE); if (!phys) panic("Failed to allocate page table page\n"); diff --git a/arch/arm64/xen/hypercall.S b/arch/arm64/xen/hypercall.S index 5b09aca55108..9d01361696a1 100644 --- a/arch/arm64/xen/hypercall.S +++ b/arch/arm64/xen/hypercall.S @@ -80,7 +80,6 @@ HYPERCALL2(hvm_op); HYPERCALL2(memory_op); HYPERCALL2(physdev_op); HYPERCALL3(vcpu_op); -HYPERCALL1(tmem_op); HYPERCALL1(platform_op_raw); HYPERCALL2(multicall); HYPERCALL2(vm_assist); diff --git a/arch/csky/Kbuild b/arch/csky/Kbuild index a4e40e534e6a..4e39f7abdeb6 100644 --- a/arch/csky/Kbuild +++ b/arch/csky/Kbuild @@ -1 +1,4 @@ # SPDX-License-Identifier: GPL-2.0-only + +# for cleaning +subdir- += boot diff --git a/arch/csky/Makefile b/arch/csky/Makefile index 37f593a4bf53..866805077636 100644 --- a/arch/csky/Makefile +++ b/arch/csky/Makefile @@ -76,9 +76,6 @@ all: zImage zImage Image uImage: vmlinux $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ -archclean: - $(Q)$(MAKE) $(clean)=$(boot) - define archhelp echo '* zImage - Compressed kernel image (arch/$(ARCH)/boot/zImage)' echo ' Image - Uncompressed kernel image (arch/$(ARCH)/boot/Image)' diff --git a/arch/csky/include/asm/syscall.h b/arch/csky/include/asm/syscall.h index f624fa3bbc22..0de5734950bf 100644 --- a/arch/csky/include/asm/syscall.h +++ b/arch/csky/include/asm/syscall.h @@ -59,15 +59,6 @@ syscall_get_arguments(struct task_struct *task, struct pt_regs *regs, memcpy(args, ®s->a1, 5 * sizeof(args[0])); } -static inline void -syscall_set_arguments(struct task_struct *task, struct pt_regs *regs, - const unsigned long *args) -{ - regs->orig_a0 = args[0]; - args++; - memcpy(®s->a1, args, 5 * sizeof(regs->a1)); -} - static inline int syscall_get_arch(struct task_struct *task) { diff --git a/arch/h8300/Kbuild b/arch/h8300/Kbuild index b2583e7efbd1..e4703f3534cc 100644 --- a/arch/h8300/Kbuild +++ b/arch/h8300/Kbuild @@ -1,2 +1,5 @@ # SPDX-License-Identifier: GPL-2.0-only obj-y += kernel/ mm/ boot/dts/ + +# for cleaning +subdir- += boot diff --git a/arch/h8300/Makefile b/arch/h8300/Makefile index eb4cb8f6830c..807f41e60ee4 100644 --- a/arch/h8300/Makefile +++ b/arch/h8300/Makefile @@ -34,9 +34,6 @@ libs-y += arch/$(ARCH)/lib/ boot := arch/h8300/boot -archclean: - $(Q)$(MAKE) $(clean)=$(boot) - vmlinux.srec vmlinux.bin zImage uImage.bin: vmlinux $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ diff --git a/arch/ia64/Makefile b/arch/ia64/Makefile index 7e548c654a29..3b3ac3e1f272 100644 --- a/arch/ia64/Makefile +++ b/arch/ia64/Makefile @@ -67,8 +67,6 @@ vmlinux.bin: vmlinux FORCE unwcheck: vmlinux -$(Q)READELF=$(READELF) $(PYTHON3) $(srctree)/arch/ia64/scripts/unwcheck.py $< -archclean: - archheaders: $(Q)$(MAKE) $(build)=arch/ia64/kernel/syscalls all diff --git a/arch/ia64/include/asm/syscall.h b/arch/ia64/include/asm/syscall.h index 0d23c0049301..2b02a3fb862a 100644 --- a/arch/ia64/include/asm/syscall.h +++ b/arch/ia64/include/asm/syscall.h @@ -55,21 +55,8 @@ static inline void syscall_set_return_value(struct task_struct *task, } } -extern void ia64_syscall_get_set_arguments(struct task_struct *task, - struct pt_regs *regs, unsigned long *args, int rw); -static inline void syscall_get_arguments(struct task_struct *task, - struct pt_regs *regs, - unsigned long *args) -{ - ia64_syscall_get_set_arguments(task, regs, args, 0); -} - -static inline void syscall_set_arguments(struct task_struct *task, - struct pt_regs *regs, - unsigned long *args) -{ - ia64_syscall_get_set_arguments(task, regs, args, 1); -} +extern void syscall_get_arguments(struct task_struct *task, + struct pt_regs *regs, unsigned long *args); static inline int syscall_get_arch(struct task_struct *task) { diff --git a/arch/ia64/kernel/ptrace.c b/arch/ia64/kernel/ptrace.c index df28c7dd164f..6a1439eaa050 100644 --- a/arch/ia64/kernel/ptrace.c +++ b/arch/ia64/kernel/ptrace.c @@ -2001,17 +2001,16 @@ const struct user_regset_view *task_user_regset_view(struct task_struct *tsk) return &user_ia64_view; } -struct syscall_get_set_args { +struct syscall_get_args { unsigned int i; unsigned int n; unsigned long *args; struct pt_regs *regs; - int rw; }; -static void syscall_get_set_args_cb(struct unw_frame_info *info, void *data) +static void syscall_get_args_cb(struct unw_frame_info *info, void *data) { - struct syscall_get_set_args *args = data; + struct syscall_get_args *args = data; struct pt_regs *pt = args->regs; unsigned long *krbs, cfm, ndirty, nlocals, nouts; int i, count; @@ -2042,37 +2041,31 @@ static void syscall_get_set_args_cb(struct unw_frame_info *info, void *data) /* Iterate over outs. */ for (i = 0; i < count; i++) { int j = ndirty + nlocals + i + args->i; - if (args->rw) - *ia64_rse_skip_regs(krbs, j) = args->args[i]; - else - args->args[i] = *ia64_rse_skip_regs(krbs, j); + args->args[i] = *ia64_rse_skip_regs(krbs, j); } - if (!args->rw) { - while (i < args->n) { - args->args[i] = 0; - i++; - } + while (i < args->n) { + args->args[i] = 0; + i++; } } -void ia64_syscall_get_set_arguments(struct task_struct *task, - struct pt_regs *regs, unsigned long *args, int rw) +void syscall_get_arguments(struct task_struct *task, + struct pt_regs *regs, unsigned long *args) { - struct syscall_get_set_args data = { + struct syscall_get_args data = { .i = 0, .n = 6, .args = args, .regs = regs, - .rw = rw, }; if (task == current) - unw_init_running(syscall_get_set_args_cb, &data); + unw_init_running(syscall_get_args_cb, &data); else { struct unw_frame_info ufi; memset(&ufi, 0, sizeof(ufi)); unw_init_from_blocked_task(&ufi, task); - syscall_get_set_args_cb(&ufi, &data); + syscall_get_args_cb(&ufi, &data); } } diff --git a/arch/m68k/Kconfig.cpu b/arch/m68k/Kconfig.cpu index 277d61a09463..0d00ef5117dc 100644 --- a/arch/m68k/Kconfig.cpu +++ b/arch/m68k/Kconfig.cpu @@ -53,17 +53,6 @@ config M68000 System-On-Chip devices (eg 68328, 68302, etc). It does not contain a paging MMU. -config MCPU32 - bool - select CPU_HAS_NO_BITFIELDS - select CPU_HAS_NO_CAS - select CPU_HAS_NO_UNALIGNED - select CPU_NO_EFFICIENT_FFS - help - The Freescale (was then Motorola) CPU32 is a CPU core that is - based on the 68020 processor. For the most part it is used in - System-On-Chip parts, and does not contain a paging MMU. - config M68020 bool "68020 support" depends on MMU diff --git a/arch/m68k/Kconfig.machine b/arch/m68k/Kconfig.machine index 36fa0c3ef129..eeab4f3e6c19 100644 --- a/arch/m68k/Kconfig.machine +++ b/arch/m68k/Kconfig.machine @@ -203,6 +203,7 @@ config INIT_LCD config MEMORY_RESERVE int "Memory reservation (MiB)" depends on (UCSIMM || UCDIMM) + default 0 help Reserve certain memory regions on 68x328 based boards. diff --git a/arch/m68k/Makefile b/arch/m68k/Makefile index dd0c0ec67f67..740fc97b9c0f 100644 --- a/arch/m68k/Makefile +++ b/arch/m68k/Makefile @@ -2,9 +2,7 @@ # m68k/Makefile # # This file is included by the global makefile so that you can add your own -# architecture-specific flags and dependencies. Remember to do have actions -# for "archclean" and "archdep" for cleaning up and making dependencies for -# this architecture +# architecture-specific flags and dependencies. # # This file is subject to the terms and conditions of the GNU General Public # License. See the file "COPYING" in the main directory of this archive diff --git a/arch/m68k/include/asm/bitops.h b/arch/m68k/include/asm/bitops.h index 7b414099e5fc..7b93e1fd8ffa 100644 --- a/arch/m68k/include/asm/bitops.h +++ b/arch/m68k/include/asm/bitops.h @@ -451,7 +451,7 @@ static inline unsigned long ffz(unsigned long word) * generic functions for those. */ #if (defined(__mcfisaaplus__) || defined(__mcfisac__)) && \ - !defined(CONFIG_M68000) && !defined(CONFIG_MCPU32) + !defined(CONFIG_M68000) static inline unsigned long __ffs(unsigned long x) { __asm__ __volatile__ ("bitrev %0; ff1 %0" diff --git a/arch/m68k/kernel/traps.c b/arch/m68k/kernel/traps.c index 9718ce94cc84..99058a6da956 100644 --- a/arch/m68k/kernel/traps.c +++ b/arch/m68k/kernel/traps.c @@ -1145,7 +1145,7 @@ asmlinkage void set_esp0(unsigned long ssp) */ asmlinkage void fpsp040_die(void) { - force_sigsegv(SIGSEGV); + force_fatal_sig(SIGSEGV); } #ifdef CONFIG_M68KFPU_EMU diff --git a/arch/microblaze/Kbuild b/arch/microblaze/Kbuild index a1c597889319..077a0b8e9615 100644 --- a/arch/microblaze/Kbuild +++ b/arch/microblaze/Kbuild @@ -3,3 +3,6 @@ obj-y += kernel/ obj-y += mm/ obj-$(CONFIG_PCI) += pci/ obj-y += boot/dts/ + +# for cleaning +subdir- += boot diff --git a/arch/microblaze/Makefile b/arch/microblaze/Makefile index 9adc6b6434df..e775a696aa6f 100644 --- a/arch/microblaze/Makefile +++ b/arch/microblaze/Makefile @@ -60,9 +60,6 @@ export DTB all: linux.bin -archclean: - $(Q)$(MAKE) $(clean)=$(boot) - archheaders: $(Q)$(MAKE) $(build)=arch/microblaze/kernel/syscalls all diff --git a/arch/microblaze/include/asm/syscall.h b/arch/microblaze/include/asm/syscall.h index 3a6924f3cbde..5eb3f624cc59 100644 --- a/arch/microblaze/include/asm/syscall.h +++ b/arch/microblaze/include/asm/syscall.h @@ -58,28 +58,6 @@ static inline microblaze_reg_t microblaze_get_syscall_arg(struct pt_regs *regs, return ~0; } -static inline void microblaze_set_syscall_arg(struct pt_regs *regs, - unsigned int n, - unsigned long val) -{ - switch (n) { - case 5: - regs->r10 = val; - case 4: - regs->r9 = val; - case 3: - regs->r8 = val; - case 2: - regs->r7 = val; - case 1: - regs->r6 = val; - case 0: - regs->r5 = val; - default: - BUG(); - } -} - static inline void syscall_get_arguments(struct task_struct *task, struct pt_regs *regs, unsigned long *args) @@ -91,17 +69,6 @@ static inline void syscall_get_arguments(struct task_struct *task, *args++ = microblaze_get_syscall_arg(regs, i++); } -static inline void syscall_set_arguments(struct task_struct *task, - struct pt_regs *regs, - const unsigned long *args) -{ - unsigned int i = 0; - unsigned int n = 6; - - while (n--) - microblaze_set_syscall_arg(regs, i++, *args++); -} - asmlinkage unsigned long do_syscall_trace_enter(struct pt_regs *regs); asmlinkage void do_syscall_trace_leave(struct pt_regs *regs); diff --git a/arch/microblaze/mm/pgtable.c b/arch/microblaze/mm/pgtable.c index c1833b159d3b..9f73265aad4e 100644 --- a/arch/microblaze/mm/pgtable.c +++ b/arch/microblaze/mm/pgtable.c @@ -34,6 +34,7 @@ #include <linux/mm_types.h> #include <linux/pgtable.h> #include <linux/memblock.h> +#include <linux/kallsyms.h> #include <asm/pgalloc.h> #include <linux/io.h> @@ -171,7 +172,7 @@ void __init mapin_ram(void) for (s = 0; s < lowmem_size; s += PAGE_SIZE) { f = _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_SHARED | _PAGE_HWEXEC; - if ((char *) v < _stext || (char *) v >= _etext) + if (!is_kernel_text(v)) f |= _PAGE_WRENABLE; else /* On the MicroBlaze, no user access diff --git a/arch/mips/Kbuild b/arch/mips/Kbuild index d5d6ef9bb986..9e8071f0e58f 100644 --- a/arch/mips/Kbuild +++ b/arch/mips/Kbuild @@ -25,3 +25,6 @@ obj-y += vdso/ ifdef CONFIG_KVM obj-y += kvm/ endif + +# for cleaning +subdir- += boot diff --git a/arch/mips/Kbuild.platforms b/arch/mips/Kbuild.platforms index 2c57994b5217..30193bcf9caa 100644 --- a/arch/mips/Kbuild.platforms +++ b/arch/mips/Kbuild.platforms @@ -37,4 +37,4 @@ platform-$(CONFIG_MACH_TX49XX) += txx9/ platform-$(CONFIG_MACH_VR41XX) += vr41xx/ # include the platform specific files -include $(patsubst %, $(srctree)/arch/mips/%/Platform, $(platform-y)) +include $(patsubst %/, $(srctree)/arch/mips/%/Platform, $(platform-y)) diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 86510741d49d..de60ad190057 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -292,6 +292,8 @@ config BMIPS_GENERIC select USB_OHCI_BIG_ENDIAN_DESC if CPU_BIG_ENDIAN select USB_OHCI_BIG_ENDIAN_MMIO if CPU_BIG_ENDIAN select HARDIRQS_SW_RESEND + select HAVE_PCI + select PCI_DRIVERS_GENERIC help Build a generic DT-based kernel image that boots on select BCM33xx cable modem chips, BCM63xx DSL chips, and BCM7xxx set-top @@ -333,6 +335,9 @@ config BCM63XX select SYS_SUPPORTS_32BIT_KERNEL select SYS_SUPPORTS_BIG_ENDIAN select SYS_HAS_EARLY_PRINTK + select SYS_HAS_CPU_BMIPS32_3300 + select SYS_HAS_CPU_BMIPS4350 + select SYS_HAS_CPU_BMIPS4380 select SWAP_IO_SPACE select GPIOLIB select MIPS_L1_CACHE_SHIFT_4 diff --git a/arch/mips/Makefile b/arch/mips/Makefile index ea3cd080a1c7..ace7f033de07 100644 --- a/arch/mips/Makefile +++ b/arch/mips/Makefile @@ -8,8 +8,7 @@ # Copyright (C) 2002, 2003, 2004 Maciej W. Rozycki # # This file is included by the global makefile so that you can add your own -# architecture-specific flags and dependencies. Remember to do have actions -# for "archclean" cleaning up for this architecture. +# architecture-specific flags and dependencies. # archscripts: scripts_basic @@ -254,7 +253,9 @@ endif # # Board-dependent options and extra files # +ifdef need-compiler include $(srctree)/arch/mips/Kbuild.platforms +endif ifdef CONFIG_PHYSICAL_START load-y = $(CONFIG_PHYSICAL_START) @@ -426,11 +427,6 @@ endif $(Q)install -D -m 644 .config $(INSTALL_PATH)/config-$(KERNELRELEASE) $(Q)install -D -m 644 System.map $(INSTALL_PATH)/System.map-$(KERNELRELEASE) -archclean: - $(Q)$(MAKE) $(clean)=arch/mips/boot - $(Q)$(MAKE) $(clean)=arch/mips/boot/compressed - $(Q)$(MAKE) $(clean)=arch/mips/boot/tools - archheaders: $(Q)$(MAKE) $(build)=arch/mips/kernel/syscalls all diff --git a/arch/mips/boot/Makefile b/arch/mips/boot/Makefile index a3da2c5d63c2..196c44fa72d9 100644 --- a/arch/mips/boot/Makefile +++ b/arch/mips/boot/Makefile @@ -171,3 +171,6 @@ $(obj)/vmlinux.itb: $(obj)/vmlinux.its $(obj)/vmlinux.bin FORCE $(obj)/vmlinux.%.itb: $(obj)/vmlinux.%.its $(obj)/vmlinux.bin.% FORCE $(call if_changed,itb-image,$<) + +# for cleaning +subdir- += compressed tools diff --git a/arch/mips/boot/compressed/.gitignore b/arch/mips/boot/compressed/.gitignore deleted file mode 100644 index d358395614c9..000000000000 --- a/arch/mips/boot/compressed/.gitignore +++ /dev/null @@ -1,3 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0-only -ashldi3.c -bswapsi.c diff --git a/arch/mips/boot/compressed/Makefile b/arch/mips/boot/compressed/Makefile index 3548b3b45269..2861a05c2e0c 100644 --- a/arch/mips/boot/compressed/Makefile +++ b/arch/mips/boot/compressed/Makefile @@ -50,19 +50,9 @@ vmlinuzobjs-$(CONFIG_MIPS_ALCHEMY) += $(obj)/uart-alchemy.o vmlinuzobjs-$(CONFIG_ATH79) += $(obj)/uart-ath79.o endif -extra-y += uart-ath79.c -$(obj)/uart-ath79.c: $(srctree)/arch/mips/ath79/early_printk.c - $(call cmd,shipped) - vmlinuzobjs-$(CONFIG_KERNEL_XZ) += $(obj)/ashldi3.o -extra-y += ashldi3.c -$(obj)/ashldi3.c: $(obj)/%.c: $(srctree)/lib/%.c FORCE - $(call if_changed,shipped) - -extra-y += bswapsi.c -$(obj)/bswapsi.c: $(obj)/%.c: $(srctree)/arch/mips/lib/%.c FORCE - $(call if_changed,shipped) +vmlinuzobjs-$(CONFIG_KERNEL_ZSTD) += $(obj)/bswapdi.o targets := $(notdir $(vmlinuzobjs-y)) diff --git a/arch/mips/boot/compressed/ashldi3.c b/arch/mips/boot/compressed/ashldi3.c new file mode 100644 index 000000000000..f7bf6a7aae31 --- /dev/null +++ b/arch/mips/boot/compressed/ashldi3.c @@ -0,0 +1,2 @@ +// SPDX-License-Identifier: GPL-2.0-only +#include "../../../../lib/ashldi3.c" diff --git a/arch/mips/boot/compressed/bswapdi.c b/arch/mips/boot/compressed/bswapdi.c new file mode 100644 index 000000000000..acb28aebb025 --- /dev/null +++ b/arch/mips/boot/compressed/bswapdi.c @@ -0,0 +1,2 @@ +// SPDX-License-Identifier: GPL-2.0-only +#include "../../lib/bswapdi.c" diff --git a/arch/mips/boot/compressed/bswapsi.c b/arch/mips/boot/compressed/bswapsi.c new file mode 100644 index 000000000000..fdb9c6476904 --- /dev/null +++ b/arch/mips/boot/compressed/bswapsi.c @@ -0,0 +1,2 @@ +// SPDX-License-Identifier: GPL-2.0-only +#include "../../lib/bswapsi.c" diff --git a/arch/mips/boot/compressed/uart-ath79.c b/arch/mips/boot/compressed/uart-ath79.c new file mode 100644 index 000000000000..d686820921be --- /dev/null +++ b/arch/mips/boot/compressed/uart-ath79.c @@ -0,0 +1,2 @@ +// SPDX-License-Identifier: GPL-2.0-only +#include "../../ath79/early_printk.c" diff --git a/arch/mips/configs/bmips_stb_defconfig b/arch/mips/configs/bmips_stb_defconfig index 625bd2d7e685..5956fb95c19f 100644 --- a/arch/mips/configs/bmips_stb_defconfig +++ b/arch/mips/configs/bmips_stb_defconfig @@ -1,6 +1,7 @@ # CONFIG_LOCALVERSION_AUTO is not set # CONFIG_SWAP is not set CONFIG_NO_HZ=y +CONFIG_HZ=1000 CONFIG_BLK_DEV_INITRD=y CONFIG_EXPERT=y # CONFIG_VM_EVENT_COUNTERS is not set @@ -8,17 +9,34 @@ CONFIG_EXPERT=y CONFIG_BMIPS_GENERIC=y CONFIG_CPU_LITTLE_ENDIAN=y CONFIG_HIGHMEM=y +CONFIG_HIGH_RES_TIMERS=y CONFIG_SMP=y CONFIG_NR_CPUS=4 +CONFIG_CC_STACKPROTECTOR_STRONG=y # CONFIG_SECCOMP is not set CONFIG_MIPS_O32_FP64_SUPPORT=y +# CONFIG_RD_GZIP is not set +# CONFIG_RD_BZIP2 is not set +# CONFIG_RD_LZMA is not set +CONFIG_RD_XZ=y +# CONFIG_RD_LZO is not set +# CONFIG_RD_LZ4 is not set +# CONFIG_IOSCHED_DEADLINE is not set +# CONFIG_IOSCHED_CFQ is not set +CONFIG_PCI=y +CONFIG_PCI_MSI=y +CONFIG_PCIEASPM_POWERSAVE=y +CONFIG_PCIEPORTBUS=y +CONFIG_PCIE_BRCMSTB=y CONFIG_CPU_FREQ=y CONFIG_CPU_FREQ_STAT=y +CONFIG_CPU_FREQ_STAT_DETAILS=y +CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y +CONFIG_CPU_FREQ_GOV_PERFORMANCE=y CONFIG_CPU_FREQ_GOV_POWERSAVE=y CONFIG_CPU_FREQ_GOV_USERSPACE=y CONFIG_CPU_FREQ_GOV_ONDEMAND=y CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y -CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y CONFIG_BMIPS_CPUFREQ=y # CONFIG_BLK_DEV_BSG is not set CONFIG_NET=y @@ -32,32 +50,99 @@ CONFIG_INET=y # CONFIG_INET_DIAG is not set CONFIG_CFG80211=y CONFIG_NL80211_TESTMODE=y +CONFIG_WIRELESS=y CONFIG_MAC80211=y +CONFIG_NL80211=y CONFIG_DEVTMPFS=y CONFIG_DEVTMPFS_MOUNT=y # CONFIG_STANDALONE is not set # CONFIG_PREVENT_FIRMWARE_BUILD is not set +CONFIG_BRCMSTB_GISB_ARB=y +CONFIG_MODULES=y +CONFIG_MODULE_FORCE_LOAD=y +CONFIG_MODULE_UNLOAD=y +CONFIG_MODVERSIONS=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +CONFIG_IP_PNP_BOOTP=y +CONFIG_IP_PNP_RARP=y +CONFIG_IP_MROUTE=y +CONFIG_IP_PIMSM_V1=y +CONFIG_IP_PIMSM_V2=y +# CONFIG_INET_XFRM_MODE_TRANSPORT is not set +# CONFIG_INET_XFRM_MODE_TUNNEL is not set +# CONFIG_INET_XFRM_MODE_BEET is not set +# CONFIG_INET_LRO is not set +CONFIG_INET_UDP_DIAG=y +CONFIG_TCP_CONG_ADVANCED=y +CONFIG_TCP_CONG_BIC=y +# CONFIG_TCP_CONG_WESTWOOD is not set +# CONFIG_TCP_CONG_HTCP is not set +# CONFIG_IPV6 is not set +CONFIG_IP_NF_IPTABLES=y +CONFIG_IP_NF_FILTER=y +CONFIG_NETFILTER=y +CONFIG_NETFILTER_XTABLES=y +CONFIG_BRIDGE=y +CONFIG_BRIDGE_NETFILTER=m +CONFIG_BRIDGE_NF_EBTABLES=m +CONFIG_BRIDGE_EBT_BROUTE=m +CONFIG_NET_DSA=y +CONFIG_NET_SWITCHDEV=y +CONFIG_DMA_CMA=y +CONFIG_CMA_ALIGNMENT=12 +CONFIG_SPI=y +CONFIG_SPI_BRCMSTB=y CONFIG_MTD=y +CONFIG_MTD_CMDLINE_PARTS=y +CONFIG_MTD_BLOCK=y CONFIG_MTD_CFI=y +CONFIG_MTD_JEDECPROBE=y CONFIG_MTD_CFI_INTELEXT=y CONFIG_MTD_CFI_AMDSTD=y -CONFIG_MTD_PHYSMAP=y +CONFIG_MTD_CFI_STAA=y +CONFIG_MTD_ROM=y +CONFIG_MTD_ABSENT=y +CONFIG_MTD_PHYSMAP_OF=y +CONFIG_MTD_M25P80=y +CONFIG_MTD_NAND=y +CONFIG_MTD_NAND_BRCMNAND=y +CONFIG_MTD_SPI_NOR=y +# CONFIG_MTD_SPI_NOR_USE_4K_SECTORS is not set +CONFIG_MTD_UBI=y +CONFIG_MTD_UBI_GLUEBI=y +CONFIG_PROC_DEVICETREE=y +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_SIZE=8192 # CONFIG_BLK_DEV is not set CONFIG_SCSI=y CONFIG_BLK_DEV_SD=y +CONFIG_CHR_DEV_SG=y +CONFIG_SCSI_MULTI_LUN=y # CONFIG_SCSI_LOWLEVEL is not set CONFIG_NETDEVICES=y +CONFIG_VLAN_8021Q=y +CONFIG_MACVLAN=y CONFIG_BCMGENET=y CONFIG_USB_USBNET=y -# CONFIG_INPUT is not set +CONFIG_INPUT_EVDEV=y +# CONFIG_INPUT_KEYBOARD is not set +# CONFIG_INPUT_MOUSE is not set +CONFIG_INPUT_MISC=y +CONFIG_INPUT_UINPUT=y # CONFIG_SERIO is not set -# CONFIG_VT is not set +CONFIG_VT=y +CONFIG_VT_HW_CONSOLE_BINDING=y +# CONFIG_DEVKMEM is not set CONFIG_SERIAL_8250=y # CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set CONFIG_SERIAL_8250_CONSOLE=y CONFIG_SERIAL_OF_PLATFORM=y # CONFIG_HW_RANDOM is not set CONFIG_POWER_RESET=y +CONFIG_POWER_RESET_BRCMSTB=y CONFIG_POWER_RESET_SYSCON=y CONFIG_POWER_SUPPLY=y # CONFIG_HWMON is not set @@ -69,22 +154,76 @@ CONFIG_USB_OHCI_HCD=y CONFIG_USB_OHCI_HCD_PLATFORM=y CONFIG_USB_STORAGE=y CONFIG_SOC_BRCMSTB=y +CONFIG_MMC=y +CONFIG_MMC_BLOCK_MINORS=16 +CONFIG_MMC_SDHCI=y +CONFIG_MMC_SDHCI_PLTFM=y CONFIG_EXT4_FS=y CONFIG_EXT4_FS_POSIX_ACL=y CONFIG_EXT4_FS_SECURITY=y # CONFIG_DNOTIFY is not set +CONFIG_PROC_KCORE=y +CONFIG_CIFS=y +CONFIG_JBD2_DEBUG=y CONFIG_FUSE_FS=y +CONFIG_FHANDLE=y +CONFIG_CGROUPS=y +CONFIG_CUSE=y +CONFIG_ISO9660_FS=y +CONFIG_JOLIET=y +CONFIG_ZISOFS=y +CONFIG_UDF_FS=y +CONFIG_MSDOS_FS=y CONFIG_VFAT_FS=y -CONFIG_PROC_KCORE=y CONFIG_TMPFS=y +CONFIG_JFFS2_FS=y +CONFIG_UBIFS_FS=y +CONFIG_SQUASHFS=y +CONFIG_SQUASHFS_LZO=y +CONFIG_SQUASHFS_XZ=y CONFIG_NFS_FS=y -CONFIG_CIFS=y +CONFIG_NFS_V3_ACL=y +CONFIG_NFS_V4=y +CONFIG_NFS_V4_1=y +CONFIG_NFS_V4_2=y +CONFIG_ROOT_NFS=y CONFIG_NLS_CODEPAGE_437=y -CONFIG_NLS_ASCII=y CONFIG_NLS_ISO8859_1=y -# CONFIG_CRYPTO_HW is not set CONFIG_PRINTK_TIME=y +CONFIG_DYNAMIC_DEBUG=y +# CONFIG_DEBUG_INFO is not set +# CONFIG_DEBUG_INFO_REDUCED is not set CONFIG_DEBUG_FS=y CONFIG_MAGIC_SYSRQ=y +CONFIG_LOCKUP_DETECTOR=y +CONFIG_DEBUG_USER=y CONFIG_CMDLINE_BOOL=y CONFIG_CMDLINE="earlycon" +# CONFIG_MIPS_CMDLINE_FROM_DTB is not set +CONFIG_MIPS_CMDLINE_DTB_EXTEND=y +# CONFIG_MIPS_CMDLINE_FROM_BOOTLOADER is not set +# CONFIG_CRYPTO_HW is not set +CONFIG_DT_BCM974XX=y +CONFIG_FW_CFE=y +CONFIG_ATA=y +CONFIG_SATA_AHCI_PLATFORM=y +CONFIG_AHCI_BRCMSTB=y +CONFIG_GENERIC_PHY=y +CONFIG_GPIOLIB=y +CONFIG_GPIO_SYSFS=y +CONFIG_PHY_BRCM_USB=y +CONFIG_PHY_BRCM_SATA=y +CONFIG_PM_RUNTIME=y +CONFIG_PM_DEBUG=y +CONFIG_SYSVIPC=y +CONFIG_FUNCTION_GRAPH_TRACER=y +CONFIG_DYNAMIC_FTRACE=y +CONFIG_FUNCTION_TRACER=y +CONFIG_FUNCTION_PROFILER=y +CONFIG_IRQSOFF_TRACER=y +CONFIG_SCHED_TRACER=y +CONFIG_BLK_DEV_IO_TRACE=y +CONFIG_FTRACE_SYSCALLS=y +CONFIG_TRACER_SNAPSHOT=y +CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP=y +CONFIG_STACK_TRACER=y diff --git a/arch/mips/dec/setup.c b/arch/mips/dec/setup.c index eaad0ed4b523..a8a30bb1dee8 100644 --- a/arch/mips/dec/setup.c +++ b/arch/mips/dec/setup.c @@ -117,21 +117,21 @@ static void __init dec_be_init(void) { switch (mips_machtype) { case MACH_DS23100: /* DS2100/DS3100 Pmin/Pmax */ - board_be_handler = dec_kn01_be_handler; + mips_set_be_handler(dec_kn01_be_handler); busirq_handler = dec_kn01_be_interrupt; busirq_flags |= IRQF_SHARED; dec_kn01_be_init(); break; case MACH_DS5000_1XX: /* DS5000/1xx 3min */ case MACH_DS5000_XX: /* DS5000/xx Maxine */ - board_be_handler = dec_kn02xa_be_handler; + mips_set_be_handler(dec_kn02xa_be_handler); busirq_handler = dec_kn02xa_be_interrupt; dec_kn02xa_be_init(); break; case MACH_DS5000_200: /* DS5000/200 3max */ case MACH_DS5000_2X0: /* DS5000/240 3max+ */ case MACH_DS5900: /* DS5900 bigmax */ - board_be_handler = dec_ecc_be_handler; + mips_set_be_handler(dec_ecc_be_handler); busirq_handler = dec_ecc_be_interrupt; dec_ecc_be_init(); break; diff --git a/arch/mips/include/asm/traps.h b/arch/mips/include/asm/traps.h index b710e76c9c65..15cde638b407 100644 --- a/arch/mips/include/asm/traps.h +++ b/arch/mips/include/asm/traps.h @@ -15,7 +15,7 @@ #define MIPS_BE_FATAL 2 /* treat as an unrecoverable error */ extern void (*board_be_init)(void); -extern int (*board_be_handler)(struct pt_regs *regs, int is_fixup); +void mips_set_be_handler(int (*handler)(struct pt_regs *reg, int is_fixup)); extern void (*board_nmi_handler_setup)(void); extern void (*board_ejtag_handler_setup)(void); diff --git a/arch/mips/kernel/r2300_fpu.S b/arch/mips/kernel/r2300_fpu.S index 12e58053544f..cbf6db98cfb3 100644 --- a/arch/mips/kernel/r2300_fpu.S +++ b/arch/mips/kernel/r2300_fpu.S @@ -29,8 +29,8 @@ #define EX2(a,b) \ 9: a,##b; \ .section __ex_table,"a"; \ - PTR 9b,bad_stack; \ - PTR 9b+4,bad_stack; \ + PTR 9b,fault; \ + PTR 9b+4,fault; \ .previous .set mips1 diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c index 2afa3eef486a..5512cd586e6e 100644 --- a/arch/mips/kernel/syscall.c +++ b/arch/mips/kernel/syscall.c @@ -240,12 +240,3 @@ SYSCALL_DEFINE3(cachectl, char *, addr, int, nbytes, int, op) { return -ENOSYS; } - -/* - * If we ever come here the user sp is bad. Zap the process right away. - * Due to the bad stack signaling wouldn't work. - */ -asmlinkage void bad_stack(void) -{ - do_exit(SIGSEGV); -} diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index 6f07362de5ce..d26b0fb8ea06 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -103,13 +103,19 @@ extern asmlinkage void handle_reserved(void); extern void tlb_do_page_fault_0(void); void (*board_be_init)(void); -int (*board_be_handler)(struct pt_regs *regs, int is_fixup); +static int (*board_be_handler)(struct pt_regs *regs, int is_fixup); void (*board_nmi_handler_setup)(void); void (*board_ejtag_handler_setup)(void); void (*board_bind_eic_interrupt)(int irq, int regset); void (*board_ebase_setup)(void); void(*board_cache_error_setup)(void); +void mips_set_be_handler(int (*handler)(struct pt_regs *regs, int is_fixup)) +{ + board_be_handler = handler; +} +EXPORT_SYMBOL_GPL(mips_set_be_handler); + static void show_raw_backtrace(unsigned long reg29, const char *loglvl, bool user) { diff --git a/arch/mips/sgi-ip22/ip22-berr.c b/arch/mips/sgi-ip22/ip22-berr.c index dc0110a607a5..afe8a61078e4 100644 --- a/arch/mips/sgi-ip22/ip22-berr.c +++ b/arch/mips/sgi-ip22/ip22-berr.c @@ -112,5 +112,5 @@ static int ip22_be_handler(struct pt_regs *regs, int is_fixup) void __init ip22_be_init(void) { - board_be_handler = ip22_be_handler; + mips_set_be_handler(ip22_be_handler); } diff --git a/arch/mips/sgi-ip22/ip28-berr.c b/arch/mips/sgi-ip22/ip28-berr.c index c61362d9ea95..16ca470deb80 100644 --- a/arch/mips/sgi-ip22/ip28-berr.c +++ b/arch/mips/sgi-ip22/ip28-berr.c @@ -468,7 +468,7 @@ static int ip28_be_handler(struct pt_regs *regs, int is_fixup) void __init ip22_be_init(void) { - board_be_handler = ip28_be_handler; + mips_set_be_handler(ip28_be_handler); } int ip28_show_be_info(struct seq_file *m) diff --git a/arch/mips/sgi-ip27/ip27-berr.c b/arch/mips/sgi-ip27/ip27-berr.c index 5a38ae6bdfa9..923a63a51cda 100644 --- a/arch/mips/sgi-ip27/ip27-berr.c +++ b/arch/mips/sgi-ip27/ip27-berr.c @@ -85,7 +85,7 @@ void __init ip27_be_init(void) int cpu = LOCAL_HUB_L(PI_CPU_NUM); int cpuoff = cpu << 8; - board_be_handler = ip27_be_handler; + mips_set_be_handler(ip27_be_handler); LOCAL_HUB_S(PI_ERR_INT_PEND, cpu ? PI_ERR_CLEAR_ALL_B : PI_ERR_CLEAR_ALL_A); diff --git a/arch/mips/sgi-ip32/ip32-berr.c b/arch/mips/sgi-ip32/ip32-berr.c index c860f95ab7ed..478b63b4c808 100644 --- a/arch/mips/sgi-ip32/ip32-berr.c +++ b/arch/mips/sgi-ip32/ip32-berr.c @@ -34,5 +34,5 @@ static int ip32_be_handler(struct pt_regs *regs, int is_fixup) void __init ip32_be_init(void) { - board_be_handler = ip32_be_handler; + mips_set_be_handler(ip32_be_handler); } diff --git a/arch/mips/sibyte/swarm/setup.c b/arch/mips/sibyte/swarm/setup.c index f07b15dd1c1a..72a31eeeebba 100644 --- a/arch/mips/sibyte/swarm/setup.c +++ b/arch/mips/sibyte/swarm/setup.c @@ -122,7 +122,7 @@ void __init plat_mem_setup(void) #error invalid SiByte board configuration #endif - board_be_handler = swarm_be_handler; + mips_set_be_handler(swarm_be_handler); if (xicor_probe()) swarm_rtc_type = RTC_XICOR; diff --git a/arch/mips/txx9/generic/setup_tx4927.c b/arch/mips/txx9/generic/setup_tx4927.c index 46e9c4101386..63f9725b2eb0 100644 --- a/arch/mips/txx9/generic/setup_tx4927.c +++ b/arch/mips/txx9/generic/setup_tx4927.c @@ -80,7 +80,7 @@ static int tx4927_be_handler(struct pt_regs *regs, int is_fixup) } static void __init tx4927_be_init(void) { - board_be_handler = tx4927_be_handler; + mips_set_be_handler(tx4927_be_handler); } static struct resource tx4927_sdram_resource[4]; diff --git a/arch/mips/txx9/generic/setup_tx4938.c b/arch/mips/txx9/generic/setup_tx4938.c index 17395d5d15ca..ba646548c5f6 100644 --- a/arch/mips/txx9/generic/setup_tx4938.c +++ b/arch/mips/txx9/generic/setup_tx4938.c @@ -82,7 +82,7 @@ static int tx4938_be_handler(struct pt_regs *regs, int is_fixup) } static void __init tx4938_be_init(void) { - board_be_handler = tx4938_be_handler; + mips_set_be_handler(tx4938_be_handler); } static struct resource tx4938_sdram_resource[4]; diff --git a/arch/mips/txx9/generic/setup_tx4939.c b/arch/mips/txx9/generic/setup_tx4939.c index bf8a3cdababf..f5f59b7401a3 100644 --- a/arch/mips/txx9/generic/setup_tx4939.c +++ b/arch/mips/txx9/generic/setup_tx4939.c @@ -86,7 +86,7 @@ static int tx4939_be_handler(struct pt_regs *regs, int is_fixup) } static void __init tx4939_be_init(void) { - board_be_handler = tx4939_be_handler; + mips_set_be_handler(tx4939_be_handler); } static struct resource tx4939_sdram_resource[4]; diff --git a/arch/mips/vdso/Makefile b/arch/mips/vdso/Makefile index 1b2ea34c3d3b..d65f55f67e19 100644 --- a/arch/mips/vdso/Makefile +++ b/arch/mips/vdso/Makefile @@ -57,7 +57,7 @@ endif # VDSO linker flags. ldflags-y := -Bsymbolic --no-undefined -soname=linux-vdso.so.1 \ - $(filter -E%,$(KBUILD_CFLAGS)) -nostdlib -shared \ + $(filter -E%,$(KBUILD_CFLAGS)) -shared \ -G 0 --eh-frame-hdr --hash-style=sysv --build-id=sha1 -T CFLAGS_REMOVE_vdso.o = $(CC_FLAGS_FTRACE) diff --git a/arch/nds32/Kbuild b/arch/nds32/Kbuild index a4e40e534e6a..4e39f7abdeb6 100644 --- a/arch/nds32/Kbuild +++ b/arch/nds32/Kbuild @@ -1 +1,4 @@ # SPDX-License-Identifier: GPL-2.0-only + +# for cleaning +subdir- += boot diff --git a/arch/nds32/Makefile b/arch/nds32/Makefile index ccdca7142020..797ad9b450af 100644 --- a/arch/nds32/Makefile +++ b/arch/nds32/Makefile @@ -9,6 +9,8 @@ endif # Avoid generating FPU instructions arch-y += -mno-ext-fpu-sp -mno-ext-fpu-dp -mfloat-abi=soft +# Enable <nds32_intrinsic.h> +KBUILD_CFLAGS += -isystem $(shell $(CC) -print-file-name=include) KBUILD_CFLAGS += $(call cc-option, -mno-sched-prolog-epilog) KBUILD_CFLAGS += -mcmodel=large @@ -62,9 +64,6 @@ prepare: vdso_prepare vdso_prepare: prepare0 $(Q)$(MAKE) $(build)=arch/nds32/kernel/vdso include/generated/vdso-offsets.h -archclean: - $(Q)$(MAKE) $(clean)=$(boot) - define archhelp echo ' Image - kernel image (arch/$(ARCH)/boot/Image)' endef diff --git a/arch/nds32/include/asm/syscall.h b/arch/nds32/include/asm/syscall.h index 7b5180d78e20..90aa56c94af1 100644 --- a/arch/nds32/include/asm/syscall.h +++ b/arch/nds32/include/asm/syscall.h @@ -132,28 +132,6 @@ syscall_get_arguments(struct task_struct *task, struct pt_regs *regs, memcpy(args, ®s->uregs[0] + 1, 5 * sizeof(args[0])); } -/** - * syscall_set_arguments - change system call parameter value - * @task: task of interest, must be in system call entry tracing - * @regs: task_pt_regs() of @task - * @args: array of argument values to store - * - * Changes 6 arguments to the system call. The first argument gets value - * @args[0], and so on. - * - * It's only valid to call this when @task is stopped for tracing on - * entry to a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT. - */ -static inline void -syscall_set_arguments(struct task_struct *task, struct pt_regs *regs, - const unsigned long *args) -{ - regs->orig_r0 = args[0]; - args++; - - memcpy(®s->uregs[0] + 1, args, 5 * sizeof(args[0])); -} - static inline int syscall_get_arch(struct task_struct *task) { diff --git a/arch/nds32/kernel/traps.c b/arch/nds32/kernel/traps.c index f06421c645af..ca75d475eda4 100644 --- a/arch/nds32/kernel/traps.c +++ b/arch/nds32/kernel/traps.c @@ -118,7 +118,7 @@ DEFINE_SPINLOCK(die_lock); /* * This function is protected against re-entrancy. */ -void die(const char *str, struct pt_regs *regs, int err) +void __noreturn die(const char *str, struct pt_regs *regs, int err) { struct task_struct *tsk = current; static int die_counter; diff --git a/arch/nds32/mm/fault.c b/arch/nds32/mm/fault.c index f02524eb6d56..1d139b117168 100644 --- a/arch/nds32/mm/fault.c +++ b/arch/nds32/mm/fault.c @@ -13,7 +13,7 @@ #include <asm/tlbflush.h> -extern void die(const char *str, struct pt_regs *regs, long err); +extern void __noreturn die(const char *str, struct pt_regs *regs, long err); /* * This is useful to dump out the page tables associated with @@ -299,10 +299,6 @@ no_context: show_pte(mm, addr); die("Oops", regs, error_code); - bust_spinlocks(0); - do_exit(SIGKILL); - - return; /* * We ran out of memory, or some other thing happened to us that made diff --git a/arch/nios2/Kbuild b/arch/nios2/Kbuild index a4e40e534e6a..4e39f7abdeb6 100644 --- a/arch/nios2/Kbuild +++ b/arch/nios2/Kbuild @@ -1 +1,4 @@ # SPDX-License-Identifier: GPL-2.0-only + +# for cleaning +subdir- += boot diff --git a/arch/nios2/Makefile b/arch/nios2/Makefile index 52c03e60b114..02d678559066 100644 --- a/arch/nios2/Makefile +++ b/arch/nios2/Makefile @@ -8,8 +8,7 @@ # Written by Fredrik Markstrom # # This file is included by the global makefile so that you can add your own -# architecture-specific flags and dependencies. Remember to do have actions -# for "archclean" cleaning up for this architecture. +# architecture-specific flags and dependencies. # # Nios2 port by Wind River Systems Inc trough: # fredrik.markstrom@gmail.com and ivarholmqvist@gmail.com @@ -53,14 +52,12 @@ core-y += $(nios2-boot)/dts/ all: vmImage -archclean: - $(Q)$(MAKE) $(clean)=$(nios2-boot) - $(BOOT_TARGETS): vmlinux $(Q)$(MAKE) $(build)=$(nios2-boot) $(nios2-boot)/$@ install: - $(Q)$(MAKE) $(build)=$(nios2-boot) BOOTIMAGE=$(KBUILD_IMAGE) install + sh $(srctree)/$(nios2-boot)/install.sh $(KERNELRELEASE) \ + $(KBUILD_IMAGE) System.map "$(INSTALL_PATH)" define archhelp echo '* vmImage - Kernel-only image for U-Boot ($(KBUILD_IMAGE))' diff --git a/arch/nios2/boot/Makefile b/arch/nios2/boot/Makefile index 37dfc7e584bc..8c3ad76602f3 100644 --- a/arch/nios2/boot/Makefile +++ b/arch/nios2/boot/Makefile @@ -30,6 +30,3 @@ $(obj)/zImage: $(obj)/compressed/vmlinux FORCE $(obj)/compressed/vmlinux: $(obj)/vmlinux.gz FORCE $(Q)$(MAKE) $(build)=$(obj)/compressed $@ - -install: - sh $(srctree)/$(src)/install.sh $(KERNELRELEASE) $(BOOTIMAGE) System.map "$(INSTALL_PATH)" diff --git a/arch/nios2/include/asm/syscall.h b/arch/nios2/include/asm/syscall.h index 526449edd768..fff52205fb65 100644 --- a/arch/nios2/include/asm/syscall.h +++ b/arch/nios2/include/asm/syscall.h @@ -58,17 +58,6 @@ static inline void syscall_get_arguments(struct task_struct *task, *args = regs->r9; } -static inline void syscall_set_arguments(struct task_struct *task, - struct pt_regs *regs, const unsigned long *args) -{ - regs->r4 = *args++; - regs->r5 = *args++; - regs->r6 = *args++; - regs->r7 = *args++; - regs->r8 = *args++; - regs->r9 = *args; -} - static inline int syscall_get_arch(struct task_struct *task) { return AUDIT_ARCH_NIOS2; diff --git a/arch/openrisc/Kbuild b/arch/openrisc/Kbuild index 4234b4c03e72..b0b0f2b03f87 100644 --- a/arch/openrisc/Kbuild +++ b/arch/openrisc/Kbuild @@ -1,3 +1,6 @@ # SPDX-License-Identifier: GPL-2.0 obj-y += lib/ kernel/ mm/ obj-y += boot/dts/ + +# for cleaning +subdir- += boot diff --git a/arch/openrisc/Makefile b/arch/openrisc/Makefile index c52de526e518..760b734fb822 100644 --- a/arch/openrisc/Makefile +++ b/arch/openrisc/Makefile @@ -1,9 +1,7 @@ # BK Id: %F% %I% %G% %U% %#% # # This file is included by the global makefile so that you can add your own -# architecture-specific flags and dependencies. Remember to do have actions -# for "archclean" and "archdep" for cleaning up and making dependencies for -# this architecture +# architecture-specific flags and dependencies. # # This file is subject to the terms and conditions of the GNU General Public # License. See the file "COPYING" in the main directory of this archive @@ -48,6 +46,3 @@ PHONY += vmlinux.bin vmlinux.bin: vmlinux $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ - -archclean: - $(Q)$(MAKE) $(clean)=$(boot) diff --git a/arch/openrisc/include/asm/syscall.h b/arch/openrisc/include/asm/syscall.h index e6383be2a195..903ed882bdec 100644 --- a/arch/openrisc/include/asm/syscall.h +++ b/arch/openrisc/include/asm/syscall.h @@ -57,13 +57,6 @@ syscall_get_arguments(struct task_struct *task, struct pt_regs *regs, memcpy(args, ®s->gpr[3], 6 * sizeof(args[0])); } -static inline void -syscall_set_arguments(struct task_struct *task, struct pt_regs *regs, - const unsigned long *args) -{ - memcpy(®s->gpr[3], args, 6 * sizeof(args[0])); -} - static inline int syscall_get_arch(struct task_struct *task) { return AUDIT_ARCH_OPENRISC; diff --git a/arch/openrisc/kernel/dma.c b/arch/openrisc/kernel/dma.c index 1b16d97e7da7..a82b2caaa560 100644 --- a/arch/openrisc/kernel/dma.c +++ b/arch/openrisc/kernel/dma.c @@ -33,7 +33,7 @@ page_set_nocache(pte_t *pte, unsigned long addr, * Flush the page out of the TLB so that the new page flags get * picked up next time there's an access */ - flush_tlb_page(NULL, addr); + flush_tlb_kernel_range(addr, addr + PAGE_SIZE); /* Flush page out of dcache */ for (cl = __pa(addr); cl < __pa(next); cl += cpuinfo->dcache_block_size) @@ -56,7 +56,7 @@ page_clear_nocache(pte_t *pte, unsigned long addr, * Flush the page out of the TLB so that the new page flags get * picked up next time there's an access */ - flush_tlb_page(NULL, addr); + flush_tlb_kernel_range(addr, addr + PAGE_SIZE); return 0; } diff --git a/arch/openrisc/kernel/signal.c b/arch/openrisc/kernel/signal.c index 1ebcff271096..99516c9191c7 100644 --- a/arch/openrisc/kernel/signal.c +++ b/arch/openrisc/kernel/signal.c @@ -28,8 +28,6 @@ #include <asm/ucontext.h> #include <linux/uaccess.h> -#define DEBUG_SIG 0 - struct rt_sigframe { struct siginfo info; struct ucontext uc; diff --git a/arch/openrisc/kernel/smp.c b/arch/openrisc/kernel/smp.c index da21e22bf4da..27041db2c8b0 100644 --- a/arch/openrisc/kernel/smp.c +++ b/arch/openrisc/kernel/smp.c @@ -268,7 +268,7 @@ static inline void ipi_flush_tlb_range(void *info) local_flush_tlb_range(NULL, fd->addr1, fd->addr2); } -static void smp_flush_tlb_range(struct cpumask *cmask, unsigned long start, +static void smp_flush_tlb_range(const struct cpumask *cmask, unsigned long start, unsigned long end) { unsigned int cpuid; @@ -316,7 +316,9 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { - smp_flush_tlb_range(mm_cpumask(vma->vm_mm), start, end); + const struct cpumask *cmask = vma ? mm_cpumask(vma->vm_mm) + : cpu_online_mask; + smp_flush_tlb_range(cmask, start, end); } /* Instruction cache invalidate - performed on each cpu */ diff --git a/arch/openrisc/kernel/time.c b/arch/openrisc/kernel/time.c index b82866061958..a6e69386f82a 100644 --- a/arch/openrisc/kernel/time.c +++ b/arch/openrisc/kernel/time.c @@ -127,7 +127,7 @@ irqreturn_t __irq_entry timer_interrupt(struct pt_regs *regs) return IRQ_HANDLED; } -/** +/* * Clocksource: Based on OpenRISC timer/counter * * This sets up the OpenRISC Tick Timer as a clock source. The tick timer diff --git a/arch/openrisc/kernel/traps.c b/arch/openrisc/kernel/traps.c index aa1e709405ac..0898cb159fac 100644 --- a/arch/openrisc/kernel/traps.c +++ b/arch/openrisc/kernel/traps.c @@ -197,7 +197,7 @@ void nommu_dump_state(struct pt_regs *regs, } /* This is normally the 'Oops' routine */ -void die(const char *str, struct pt_regs *regs, long err) +void __noreturn die(const char *str, struct pt_regs *regs, long err) { console_verbose(); diff --git a/arch/openrisc/mm/fault.c b/arch/openrisc/mm/fault.c index c730d1a51686..f0fa6394a58e 100644 --- a/arch/openrisc/mm/fault.c +++ b/arch/openrisc/mm/fault.c @@ -32,7 +32,7 @@ unsigned long pte_errors; /* updated by do_page_fault() */ */ volatile pgd_t *current_pgd[NR_CPUS]; -extern void die(char *, struct pt_regs *, long); +extern void __noreturn die(char *, struct pt_regs *, long); /* * This routine handles page faults. It determines the address, @@ -248,8 +248,6 @@ no_context: die("Oops", regs, write_acc); - do_exit(SIGKILL); - /* * We ran out of memory, or some other thing happened to us that made * us unable to handle the page fault gracefully. diff --git a/arch/parisc/Kbuild b/arch/parisc/Kbuild index 3c068b700a81..a6d3b280ba0c 100644 --- a/arch/parisc/Kbuild +++ b/arch/parisc/Kbuild @@ -1,2 +1,5 @@ # SPDX-License-Identifier: GPL-2.0-only obj-y += mm/ kernel/ math-emu/ + +# for cleaning +subdir- += boot diff --git a/arch/parisc/Makefile b/arch/parisc/Makefile index fcde3ffa0221..8db4af4879d0 100644 --- a/arch/parisc/Makefile +++ b/arch/parisc/Makefile @@ -2,9 +2,7 @@ # parisc/Makefile # # This file is included by the global makefile so that you can add your own -# architecture-specific flags and dependencies. Remember to do have actions -# for "archclean" and "archdep" for cleaning up and making dependencies for -# this architecture +# architecture-specific flags and dependencies. # # This file is subject to the terms and conditions of the GNU General Public # License. See the file "COPYING" in the main directory of this archive @@ -181,8 +179,5 @@ define archhelp @echo ' zinstall - Install compressed vmlinuz kernel' endef -archclean: - $(Q)$(MAKE) $(clean)=$(boot) - archheaders: $(Q)$(MAKE) $(build)=arch/parisc/kernel/syscalls all diff --git a/arch/powerpc/Kbuild b/arch/powerpc/Kbuild index 5e2f9eaa3ee7..22cd0d55a892 100644 --- a/arch/powerpc/Kbuild +++ b/arch/powerpc/Kbuild @@ -16,3 +16,6 @@ obj-$(CONFIG_KVM) += kvm/ obj-$(CONFIG_PERF_EVENTS) += perf/ obj-$(CONFIG_KEXEC_CORE) += kexec/ obj-$(CONFIG_KEXEC_FILE) += purgatory/ + +# for cleaning +subdir- += boot diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile index 54cad1faa5d0..e02568f17334 100644 --- a/arch/powerpc/Makefile +++ b/arch/powerpc/Makefile @@ -1,7 +1,5 @@ # This file is included by the global makefile so that you can add your own -# architecture-specific flags and dependencies. Remember to do have actions -# for "archclean" and "archdep" for cleaning up and making dependencies for -# this architecture. +# architecture-specific flags and dependencies. # # This file is subject to the terms and conditions of the GNU General Public # License. See the file "COPYING" in the main directory of this archive @@ -411,9 +409,6 @@ install: sh -x $(srctree)/$(boot)/install.sh "$(KERNELRELEASE)" vmlinux \ System.map "$(INSTALL_PATH)" -archclean: - $(Q)$(MAKE) $(clean)=$(boot) - ifeq ($(KBUILD_EXTMOD),) # We need to generate vdso-offsets.h before compiling certain files in kernel/. # In order to do that, we should use the archprepare target, but we can't since diff --git a/arch/powerpc/include/asm/syscall.h b/arch/powerpc/include/asm/syscall.h index c60ebd04b2ed..52d05b465e3e 100644 --- a/arch/powerpc/include/asm/syscall.h +++ b/arch/powerpc/include/asm/syscall.h @@ -103,16 +103,6 @@ static inline void syscall_get_arguments(struct task_struct *task, } } -static inline void syscall_set_arguments(struct task_struct *task, - struct pt_regs *regs, - const unsigned long *args) -{ - memcpy(®s->gpr[3], args, 6 * sizeof(args[0])); - - /* Also copy the first argument into orig_gpr3 */ - regs->orig_gpr3 = args[0]; -} - static inline int syscall_get_arch(struct task_struct *task) { if (is_32bit_task()) diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c index 38c3eae40c14..00a9c9cd6d42 100644 --- a/arch/powerpc/kernel/signal_32.c +++ b/arch/powerpc/kernel/signal_32.c @@ -1062,8 +1062,10 @@ SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx, * or if another thread unmaps the region containing the context. * We kill the task with a SIGSEGV in this situation. */ - if (do_setcontext(new_ctx, regs, 0)) - do_exit(SIGSEGV); + if (do_setcontext(new_ctx, regs, 0)) { + force_fatal_sig(SIGSEGV); + return -EFAULT; + } set_thread_flag(TIF_RESTOREALL); return 0; diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c index 9f471b4a11e3..ef518535d436 100644 --- a/arch/powerpc/kernel/signal_64.c +++ b/arch/powerpc/kernel/signal_64.c @@ -703,15 +703,18 @@ SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx, * We kill the task with a SIGSEGV in this situation. */ - if (__get_user_sigset(&set, &new_ctx->uc_sigmask)) - do_exit(SIGSEGV); + if (__get_user_sigset(&set, &new_ctx->uc_sigmask)) { + force_fatal_sig(SIGSEGV); + return -EFAULT; + } set_current_blocked(&set); if (!user_read_access_begin(new_ctx, ctx_size)) return -EFAULT; if (__unsafe_restore_sigcontext(current, NULL, 0, &new_ctx->uc_mcontext)) { user_read_access_end(); - do_exit(SIGSEGV); + force_fatal_sig(SIGSEGV); + return -EFAULT; } user_read_access_end(); diff --git a/arch/powerpc/kvm/book3s_hv_uvmem.c b/arch/powerpc/kvm/book3s_hv_uvmem.c index a7061ee3b157..28c436df9935 100644 --- a/arch/powerpc/kvm/book3s_hv_uvmem.c +++ b/arch/powerpc/kvm/book3s_hv_uvmem.c @@ -560,7 +560,7 @@ static int __kvmppc_svm_page_out(struct vm_area_struct *vma, gpa, 0, page_shift); if (ret == U_SUCCESS) - *mig.dst = migrate_pfn(pfn) | MIGRATE_PFN_LOCKED; + *mig.dst = migrate_pfn(pfn); else { unlock_page(dpage); __free_page(dpage); @@ -774,7 +774,7 @@ static int kvmppc_svm_page_in(struct vm_area_struct *vma, } } - *mig.dst = migrate_pfn(page_to_pfn(dpage)) | MIGRATE_PFN_LOCKED; + *mig.dst = migrate_pfn(page_to_pfn(dpage)); migrate_vma_pages(&mig); out_finalize: migrate_vma_finalize(&mig); diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile index 99a7c9132422..9e5d0f413b71 100644 --- a/arch/powerpc/lib/Makefile +++ b/arch/powerpc/lib/Makefile @@ -65,5 +65,7 @@ obj-$(CONFIG_FTR_FIXUP_SELFTEST) += feature-fixups-test.o obj-$(CONFIG_ALTIVEC) += xor_vmx.o xor_vmx_glue.o CFLAGS_xor_vmx.o += -maltivec $(call cc-option,-mabi=altivec) +# Enable <altivec.h> +CFLAGS_xor_vmx.o += -isystem $(shell $(CC) -print-file-name=include) obj-$(CONFIG_PPC64) += $(obj64-y) diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c index fde1ed445ca4..906e4e4328b2 100644 --- a/arch/powerpc/mm/pgtable_32.c +++ b/arch/powerpc/mm/pgtable_32.c @@ -33,8 +33,6 @@ #include <mm/mmu_decl.h> -extern char etext[], _stext[], _sinittext[], _einittext[]; - static u8 early_fixmap_pagetable[FIXMAP_PTE_SIZE] __page_aligned_data; notrace void __init early_ioremap_init(void) @@ -104,14 +102,13 @@ static void __init __mapin_ram_chunk(unsigned long offset, unsigned long top) { unsigned long v, s; phys_addr_t p; - int ktext; + bool ktext; s = offset; v = PAGE_OFFSET + s; p = memstart_addr + s; for (; s < top; s += PAGE_SIZE) { - ktext = ((char *)v >= _stext && (char *)v < etext) || - ((char *)v >= _sinittext && (char *)v < _einittext); + ktext = core_kernel_text(v); map_kernel_page(v, p, ktext ? PAGE_KERNEL_TEXT : PAGE_KERNEL); v += PAGE_SIZE; p += PAGE_SIZE; diff --git a/arch/powerpc/platforms/powernv/ocxl.c b/arch/powerpc/platforms/powernv/ocxl.c index 9105efcf242a..28b009b46464 100644 --- a/arch/powerpc/platforms/powernv/ocxl.c +++ b/arch/powerpc/platforms/powernv/ocxl.c @@ -107,7 +107,8 @@ static int get_max_afu_index(struct pci_dev *dev, int *afu_idx) int pos; u32 val; - pos = find_dvsec_from_pos(dev, OCXL_DVSEC_FUNC_ID, 0); + pos = pci_find_dvsec_capability(dev, PCI_VENDOR_ID_IBM, + OCXL_DVSEC_FUNC_ID); if (!pos) return -ESRCH; diff --git a/arch/riscv/Kbuild b/arch/riscv/Kbuild index 4614c01ba5b3..fb3397223d52 100644 --- a/arch/riscv/Kbuild +++ b/arch/riscv/Kbuild @@ -2,3 +2,6 @@ obj-y += kernel/ mm/ net/ obj-$(CONFIG_BUILTIN_DTB) += boot/dts/ + +# for cleaning +subdir- += boot diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index a34c531be4e7..821252b65f89 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -62,6 +62,7 @@ config RISCV select GENERIC_SCHED_CLOCK select GENERIC_SMP_IDLE_THREAD select GENERIC_TIME_VSYSCALL if MMU && 64BIT + select GENERIC_VDSO_TIME_NS if HAVE_GENERIC_VDSO select HAVE_ARCH_AUDITSYSCALL select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL select HAVE_ARCH_JUMP_LABEL_RELATIVE if !XIP_KERNEL diff --git a/arch/riscv/Makefile b/arch/riscv/Makefile index 58c1a28e20bb..5927c94302b8 100644 --- a/arch/riscv/Makefile +++ b/arch/riscv/Makefile @@ -1,7 +1,5 @@ # This file is included by the global makefile so that you can add your own -# architecture-specific flags and dependencies. Remember to do have actions -# for "archclean" and "archdep" for cleaning up and making dependencies for -# this architecture +# architecture-specific flags and dependencies. # # This file is subject to the terms and conditions of the GNU General Public # License. See the file "COPYING" in the main directory of this archive @@ -139,5 +137,12 @@ install zinstall: $(CONFIG_SHELL) $(srctree)/$(boot)/install.sh $(KERNELRELEASE) \ $(boot)/$(install-image) System.map "$(INSTALL_PATH)" -archclean: - $(Q)$(MAKE) $(clean)=$(boot) +PHONY += rv32_randconfig +rv32_randconfig: + $(Q)$(MAKE) KCONFIG_ALLCONFIG=$(srctree)/arch/riscv/configs/32-bit.config \ + -f $(srctree)/Makefile randconfig + +PHONY += rv64_randconfig +rv64_randconfig: + $(Q)$(MAKE) KCONFIG_ALLCONFIG=$(srctree)/arch/riscv/configs/64-bit.config \ + -f $(srctree)/Makefile randconfig diff --git a/arch/riscv/boot/dts/microchip/microchip-mpfs-icicle-kit.dts b/arch/riscv/boot/dts/microchip/microchip-mpfs-icicle-kit.dts index b254c60589a1..fc1e5869df1b 100644 --- a/arch/riscv/boot/dts/microchip/microchip-mpfs-icicle-kit.dts +++ b/arch/riscv/boot/dts/microchip/microchip-mpfs-icicle-kit.dts @@ -9,10 +9,8 @@ #define RTCCLK_FREQ 1000000 / { - #address-cells = <2>; - #size-cells = <2>; model = "Microchip PolarFire-SoC Icicle Kit"; - compatible = "microchip,mpfs-icicle-kit"; + compatible = "microchip,mpfs-icicle-kit", "microchip,mpfs"; aliases { ethernet0 = &emac1; @@ -35,9 +33,6 @@ reg = <0x0 0x80000000 0x0 0x40000000>; clocks = <&clkcfg 26>; }; - - soc { - }; }; &serial0 { @@ -56,8 +51,17 @@ status = "okay"; }; -&sdcard { +&mmc { status = "okay"; + + bus-width = <4>; + disable-wp; + cap-sd-highspeed; + card-detect-delay = <200>; + sd-uhs-sdr12; + sd-uhs-sdr25; + sd-uhs-sdr50; + sd-uhs-sdr104; }; &emac0 { diff --git a/arch/riscv/boot/dts/microchip/microchip-mpfs.dtsi b/arch/riscv/boot/dts/microchip/microchip-mpfs.dtsi index 9d2fbbc1f777..c9f6d205d2ba 100644 --- a/arch/riscv/boot/dts/microchip/microchip-mpfs.dtsi +++ b/arch/riscv/boot/dts/microchip/microchip-mpfs.dtsi @@ -6,8 +6,8 @@ / { #address-cells = <2>; #size-cells = <2>; - model = "Microchip MPFS Icicle Kit"; - compatible = "microchip,mpfs-icicle-kit"; + model = "Microchip PolarFire SoC"; + compatible = "microchip,mpfs"; chosen { }; @@ -161,7 +161,7 @@ }; clint@2000000 { - compatible = "sifive,clint0"; + compatible = "sifive,fu540-c000-clint", "sifive,clint0"; reg = <0x0 0x2000000 0x0 0xC000>; interrupts-extended = <&cpu0_intc 3 &cpu0_intc 7 &cpu1_intc 3 &cpu1_intc 7 @@ -172,7 +172,7 @@ plic: interrupt-controller@c000000 { #interrupt-cells = <1>; - compatible = "sifive,plic-1.0.0"; + compatible = "sifive,fu540-c000-plic", "sifive,plic-1.0.0"; reg = <0x0 0xc000000 0x0 0x4000000>; riscv,ndev = <186>; interrupt-controller; @@ -262,39 +262,13 @@ status = "disabled"; }; - emmc: mmc@20008000 { - compatible = "cdns,sd4hc"; + /* Common node entry for emmc/sd */ + mmc: mmc@20008000 { + compatible = "microchip,mpfs-sd4hc", "cdns,sd4hc"; reg = <0x0 0x20008000 0x0 0x1000>; interrupt-parent = <&plic>; interrupts = <88 89>; - pinctrl-names = "default"; clocks = <&clkcfg 6>; - bus-width = <4>; - cap-mmc-highspeed; - mmc-ddr-3_3v; - max-frequency = <200000000>; - non-removable; - no-sd; - no-sdio; - voltage-ranges = <3300 3300>; - status = "disabled"; - }; - - sdcard: sdhc@20008000 { - compatible = "cdns,sd4hc"; - reg = <0x0 0x20008000 0x0 0x1000>; - interrupt-parent = <&plic>; - interrupts = <88>; - pinctrl-names = "default"; - clocks = <&clkcfg 6>; - bus-width = <4>; - disable-wp; - cap-sd-highspeed; - card-detect-delay = <200>; - sd-uhs-sdr12; - sd-uhs-sdr25; - sd-uhs-sdr50; - sd-uhs-sdr104; max-frequency = <200000000>; status = "disabled"; }; diff --git a/arch/riscv/boot/dts/sifive/fu540-c000.dtsi b/arch/riscv/boot/dts/sifive/fu540-c000.dtsi index 7db861053483..0655b5c4201d 100644 --- a/arch/riscv/boot/dts/sifive/fu540-c000.dtsi +++ b/arch/riscv/boot/dts/sifive/fu540-c000.dtsi @@ -141,7 +141,7 @@ ranges; plic0: interrupt-controller@c000000 { #interrupt-cells = <1>; - compatible = "sifive,plic-1.0.0"; + compatible = "sifive,fu540-c000-plic", "sifive,plic-1.0.0"; reg = <0x0 0xc000000 0x0 0x4000000>; riscv,ndev = <53>; interrupt-controller; diff --git a/arch/riscv/boot/dts/sifive/hifive-unleashed-a00.dts b/arch/riscv/boot/dts/sifive/hifive-unleashed-a00.dts index 60846e88ae4b..ba304d4c455c 100644 --- a/arch/riscv/boot/dts/sifive/hifive-unleashed-a00.dts +++ b/arch/riscv/boot/dts/sifive/hifive-unleashed-a00.dts @@ -8,10 +8,9 @@ #define RTCCLK_FREQ 1000000 / { - #address-cells = <2>; - #size-cells = <2>; model = "SiFive HiFive Unleashed A00"; - compatible = "sifive,hifive-unleashed-a00", "sifive,fu540-c000"; + compatible = "sifive,hifive-unleashed-a00", "sifive,fu540-c000", + "sifive,fu540"; chosen { stdout-path = "serial0"; @@ -26,9 +25,6 @@ reg = <0x0 0x80000000 0x2 0x00000000>; }; - soc { - }; - hfclk: hfclk { #clock-cells = <0>; compatible = "fixed-clock"; @@ -63,7 +59,7 @@ &qspi0 { status = "okay"; flash@0 { - compatible = "issi,is25wp256", "jedec,spi-nor"; + compatible = "jedec,spi-nor"; reg = <0>; spi-max-frequency = <50000000>; m25p,fast-read; diff --git a/arch/riscv/boot/dts/sifive/hifive-unmatched-a00.dts b/arch/riscv/boot/dts/sifive/hifive-unmatched-a00.dts index 2e4ea84f27e7..4f66919215f6 100644 --- a/arch/riscv/boot/dts/sifive/hifive-unmatched-a00.dts +++ b/arch/riscv/boot/dts/sifive/hifive-unmatched-a00.dts @@ -8,8 +8,6 @@ #define RTCCLK_FREQ 1000000 / { - #address-cells = <2>; - #size-cells = <2>; model = "SiFive HiFive Unmatched A00"; compatible = "sifive,hifive-unmatched-a00", "sifive,fu740-c000", "sifive,fu740"; @@ -27,9 +25,6 @@ reg = <0x0 0x80000000 0x4 0x00000000>; }; - soc { - }; - hfclk: hfclk { #clock-cells = <0>; compatible = "fixed-clock"; @@ -211,7 +206,7 @@ &qspi0 { status = "okay"; flash@0 { - compatible = "issi,is25wp256", "jedec,spi-nor"; + compatible = "jedec,spi-nor"; reg = <0>; spi-max-frequency = <50000000>; m25p,fast-read; diff --git a/arch/riscv/configs/32-bit.config b/arch/riscv/configs/32-bit.config new file mode 100644 index 000000000000..43f41323b67e --- /dev/null +++ b/arch/riscv/configs/32-bit.config @@ -0,0 +1,2 @@ +CONFIG_ARCH_RV32I=y +CONFIG_32BIT=y diff --git a/arch/riscv/configs/64-bit.config b/arch/riscv/configs/64-bit.config new file mode 100644 index 000000000000..313edc554d84 --- /dev/null +++ b/arch/riscv/configs/64-bit.config @@ -0,0 +1,2 @@ +CONFIG_ARCH_RV64I=y +CONFIG_64BIT=y diff --git a/arch/riscv/configs/defconfig b/arch/riscv/configs/defconfig index 4ebc80315f01..c252fd5706d2 100644 --- a/arch/riscv/configs/defconfig +++ b/arch/riscv/configs/defconfig @@ -72,9 +72,10 @@ CONFIG_GPIOLIB=y CONFIG_GPIO_SIFIVE=y # CONFIG_PTP_1588_CLOCK is not set CONFIG_POWER_RESET=y -CONFIG_DRM=y -CONFIG_DRM_RADEON=y -CONFIG_DRM_VIRTIO_GPU=y +CONFIG_DRM=m +CONFIG_DRM_RADEON=m +CONFIG_DRM_NOUVEAU=m +CONFIG_DRM_VIRTIO_GPU=m CONFIG_FRAMEBUFFER_CONSOLE=y CONFIG_USB=y CONFIG_USB_XHCI_HCD=y diff --git a/arch/riscv/include/asm/page.h b/arch/riscv/include/asm/page.h index 109c97e991a6..b3e5ff0125fe 100644 --- a/arch/riscv/include/asm/page.h +++ b/arch/riscv/include/asm/page.h @@ -157,6 +157,8 @@ extern phys_addr_t __phys_addr_symbol(unsigned long x); #define page_to_bus(page) (page_to_phys(page)) #define phys_to_page(paddr) (pfn_to_page(phys_to_pfn(paddr))) +#define sym_to_pfn(x) __phys_to_pfn(__pa_symbol(x)) + #ifdef CONFIG_FLATMEM #define pfn_valid(pfn) \ (((pfn) >= ARCH_PFN_OFFSET) && (((pfn) - ARCH_PFN_OFFSET) < max_mapnr)) diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h index 39b550310ec6..bf204e7c1f74 100644 --- a/arch/riscv/include/asm/pgtable.h +++ b/arch/riscv/include/asm/pgtable.h @@ -75,7 +75,8 @@ #endif #ifdef CONFIG_XIP_KERNEL -#define XIP_OFFSET SZ_8M +#define XIP_OFFSET SZ_32M +#define XIP_OFFSET_MASK (SZ_32M - 1) #else #define XIP_OFFSET 0 #endif @@ -97,7 +98,8 @@ #ifdef CONFIG_XIP_KERNEL #define XIP_FIXUP(addr) ({ \ uintptr_t __a = (uintptr_t)(addr); \ - (__a >= CONFIG_XIP_PHYS_ADDR && __a < CONFIG_XIP_PHYS_ADDR + SZ_16M) ? \ + (__a >= CONFIG_XIP_PHYS_ADDR && \ + __a < CONFIG_XIP_PHYS_ADDR + XIP_OFFSET * 2) ? \ __a - CONFIG_XIP_PHYS_ADDR + CONFIG_PHYS_RAM_BASE - XIP_OFFSET :\ __a; \ }) diff --git a/arch/riscv/include/asm/syscall.h b/arch/riscv/include/asm/syscall.h index 34fbb3ea21d5..7ac6a0e275f2 100644 --- a/arch/riscv/include/asm/syscall.h +++ b/arch/riscv/include/asm/syscall.h @@ -64,15 +64,6 @@ static inline void syscall_get_arguments(struct task_struct *task, memcpy(args, ®s->a1, 5 * sizeof(args[0])); } -static inline void syscall_set_arguments(struct task_struct *task, - struct pt_regs *regs, - const unsigned long *args) -{ - regs->orig_a0 = args[0]; - args++; - memcpy(®s->a1, args, 5 * sizeof(regs->a1)); -} - static inline int syscall_get_arch(struct task_struct *task) { #ifdef CONFIG_64BIT diff --git a/arch/riscv/include/asm/vdso.h b/arch/riscv/include/asm/vdso.h index 208e31bc5d1c..bc6f75f3a199 100644 --- a/arch/riscv/include/asm/vdso.h +++ b/arch/riscv/include/asm/vdso.h @@ -8,30 +8,19 @@ #ifndef _ASM_RISCV_VDSO_H #define _ASM_RISCV_VDSO_H - -/* - * All systems with an MMU have a VDSO, but systems without an MMU don't - * support shared libraries and therefor don't have one. - */ -#ifdef CONFIG_MMU - -#include <linux/types.h> /* * All systems with an MMU have a VDSO, but systems without an MMU don't * support shared libraries and therefor don't have one. */ #ifdef CONFIG_MMU -#define __VVAR_PAGES 1 +#define __VVAR_PAGES 2 #ifndef __ASSEMBLY__ #include <generated/vdso-offsets.h> #define VDSO_SYMBOL(base, name) \ (void __user *)((unsigned long)(base) + __vdso_##name##_offset) - -#endif /* CONFIG_MMU */ - #endif /* !__ASSEMBLY__ */ #endif /* CONFIG_MMU */ diff --git a/arch/riscv/include/asm/vdso/gettimeofday.h b/arch/riscv/include/asm/vdso/gettimeofday.h index f839f16e0d2a..77d9c2f721c4 100644 --- a/arch/riscv/include/asm/vdso/gettimeofday.h +++ b/arch/riscv/include/asm/vdso/gettimeofday.h @@ -76,6 +76,13 @@ static __always_inline const struct vdso_data *__arch_get_vdso_data(void) return _vdso_data; } +#ifdef CONFIG_TIME_NS +static __always_inline +const struct vdso_data *__arch_get_timens_vdso_data(const struct vdso_data *vd) +{ + return _timens_data; +} +#endif #endif /* !__ASSEMBLY__ */ #endif /* __ASM_VDSO_GETTIMEOFDAY_H */ diff --git a/arch/riscv/kernel/head.S b/arch/riscv/kernel/head.S index 25ec50573957..f52f01ecbeea 100644 --- a/arch/riscv/kernel/head.S +++ b/arch/riscv/kernel/head.S @@ -20,10 +20,20 @@ REG_L t0, _xip_fixup add \reg, \reg, t0 .endm +.macro XIP_FIXUP_FLASH_OFFSET reg + la t1, __data_loc + li t0, XIP_OFFSET_MASK + and t1, t1, t0 + li t1, XIP_OFFSET + sub t0, t0, t1 + sub \reg, \reg, t0 +.endm _xip_fixup: .dword CONFIG_PHYS_RAM_BASE - CONFIG_XIP_PHYS_ADDR - XIP_OFFSET #else .macro XIP_FIXUP_OFFSET reg .endm +.macro XIP_FIXUP_FLASH_OFFSET reg +.endm #endif /* CONFIG_XIP_KERNEL */ __HEAD @@ -267,6 +277,7 @@ pmp_done: la a3, hart_lottery mv a2, a3 XIP_FIXUP_OFFSET a2 + XIP_FIXUP_FLASH_OFFSET a3 lw t1, (a3) amoswap.w t0, t1, (a2) /* first time here if hart_lottery in RAM is not set */ @@ -305,6 +316,7 @@ clear_bss_done: XIP_FIXUP_OFFSET sp #ifdef CONFIG_BUILTIN_DTB la a0, __dtb_start + XIP_FIXUP_OFFSET a0 #else mv a0, s1 #endif /* CONFIG_BUILTIN_DTB */ diff --git a/arch/riscv/kernel/reset.c b/arch/riscv/kernel/reset.c index ee5878d968cc..9c842c41684a 100644 --- a/arch/riscv/kernel/reset.c +++ b/arch/riscv/kernel/reset.c @@ -12,7 +12,7 @@ static void default_power_off(void) wait_for_interrupt(); } -void (*pm_power_off)(void) = default_power_off; +void (*pm_power_off)(void) = NULL; EXPORT_SYMBOL(pm_power_off); void machine_restart(char *cmd) @@ -23,10 +23,16 @@ void machine_restart(char *cmd) void machine_halt(void) { - pm_power_off(); + if (pm_power_off != NULL) + pm_power_off(); + else + default_power_off(); } void machine_power_off(void) { - pm_power_off(); + if (pm_power_off != NULL) + pm_power_off(); + else + default_power_off(); } diff --git a/arch/riscv/kernel/vdso.c b/arch/riscv/kernel/vdso.c index b70956d80408..a9436a65161a 100644 --- a/arch/riscv/kernel/vdso.c +++ b/arch/riscv/kernel/vdso.c @@ -13,6 +13,7 @@ #include <linux/err.h> #include <asm/page.h> #include <asm/vdso.h> +#include <linux/time_namespace.h> #ifdef CONFIG_GENERIC_TIME_VSYSCALL #include <vdso/datapage.h> @@ -25,14 +26,12 @@ extern char vdso_start[], vdso_end[]; enum vvar_pages { VVAR_DATA_PAGE_OFFSET, + VVAR_TIMENS_PAGE_OFFSET, VVAR_NR_PAGES, }; #define VVAR_SIZE (VVAR_NR_PAGES << PAGE_SHIFT) -static unsigned int vdso_pages __ro_after_init; -static struct page **vdso_pagelist __ro_after_init; - /* * The vDSO data page. */ @@ -42,83 +41,228 @@ static union { } vdso_data_store __page_aligned_data; struct vdso_data *vdso_data = &vdso_data_store.data; -static int __init vdso_init(void) +struct __vdso_info { + const char *name; + const char *vdso_code_start; + const char *vdso_code_end; + unsigned long vdso_pages; + /* Data Mapping */ + struct vm_special_mapping *dm; + /* Code Mapping */ + struct vm_special_mapping *cm; +}; + +static struct __vdso_info vdso_info __ro_after_init = { + .name = "vdso", + .vdso_code_start = vdso_start, + .vdso_code_end = vdso_end, +}; + +static int vdso_mremap(const struct vm_special_mapping *sm, + struct vm_area_struct *new_vma) +{ + current->mm->context.vdso = (void *)new_vma->vm_start; + + return 0; +} + +static int __init __vdso_init(void) { unsigned int i; + struct page **vdso_pagelist; + unsigned long pfn; - vdso_pages = (vdso_end - vdso_start) >> PAGE_SHIFT; - vdso_pagelist = - kcalloc(vdso_pages + VVAR_NR_PAGES, sizeof(struct page *), GFP_KERNEL); - if (unlikely(vdso_pagelist == NULL)) { - pr_err("vdso: pagelist allocation failed\n"); - return -ENOMEM; + if (memcmp(vdso_info.vdso_code_start, "\177ELF", 4)) { + pr_err("vDSO is not a valid ELF object!\n"); + return -EINVAL; } - for (i = 0; i < vdso_pages; i++) { - struct page *pg; + vdso_info.vdso_pages = ( + vdso_info.vdso_code_end - + vdso_info.vdso_code_start) >> + PAGE_SHIFT; + + vdso_pagelist = kcalloc(vdso_info.vdso_pages, + sizeof(struct page *), + GFP_KERNEL); + if (vdso_pagelist == NULL) + return -ENOMEM; + + /* Grab the vDSO code pages. */ + pfn = sym_to_pfn(vdso_info.vdso_code_start); + + for (i = 0; i < vdso_info.vdso_pages; i++) + vdso_pagelist[i] = pfn_to_page(pfn + i); + + vdso_info.cm->pages = vdso_pagelist; + + return 0; +} + +#ifdef CONFIG_TIME_NS +struct vdso_data *arch_get_vdso_data(void *vvar_page) +{ + return (struct vdso_data *)(vvar_page); +} + +/* + * The vvar mapping contains data for a specific time namespace, so when a task + * changes namespace we must unmap its vvar data for the old namespace. + * Subsequent faults will map in data for the new namespace. + * + * For more details see timens_setup_vdso_data(). + */ +int vdso_join_timens(struct task_struct *task, struct time_namespace *ns) +{ + struct mm_struct *mm = task->mm; + struct vm_area_struct *vma; + + mmap_read_lock(mm); - pg = virt_to_page(vdso_start + (i << PAGE_SHIFT)); - vdso_pagelist[i] = pg; + for (vma = mm->mmap; vma; vma = vma->vm_next) { + unsigned long size = vma->vm_end - vma->vm_start; + + if (vma_is_special_mapping(vma, vdso_info.dm)) + zap_page_range(vma, vma->vm_start, size); } - vdso_pagelist[i] = virt_to_page(vdso_data); + mmap_read_unlock(mm); return 0; } + +static struct page *find_timens_vvar_page(struct vm_area_struct *vma) +{ + if (likely(vma->vm_mm == current->mm)) + return current->nsproxy->time_ns->vvar_page; + + /* + * VM_PFNMAP | VM_IO protect .fault() handler from being called + * through interfaces like /proc/$pid/mem or + * process_vm_{readv,writev}() as long as there's no .access() + * in special_mapping_vmops. + * For more details check_vma_flags() and __access_remote_vm() + */ + WARN(1, "vvar_page accessed remotely"); + + return NULL; +} +#else +static struct page *find_timens_vvar_page(struct vm_area_struct *vma) +{ + return NULL; +} +#endif + +static vm_fault_t vvar_fault(const struct vm_special_mapping *sm, + struct vm_area_struct *vma, struct vm_fault *vmf) +{ + struct page *timens_page = find_timens_vvar_page(vma); + unsigned long pfn; + + switch (vmf->pgoff) { + case VVAR_DATA_PAGE_OFFSET: + if (timens_page) + pfn = page_to_pfn(timens_page); + else + pfn = sym_to_pfn(vdso_data); + break; +#ifdef CONFIG_TIME_NS + case VVAR_TIMENS_PAGE_OFFSET: + /* + * If a task belongs to a time namespace then a namespace + * specific VVAR is mapped with the VVAR_DATA_PAGE_OFFSET and + * the real VVAR page is mapped with the VVAR_TIMENS_PAGE_OFFSET + * offset. + * See also the comment near timens_setup_vdso_data(). + */ + if (!timens_page) + return VM_FAULT_SIGBUS; + pfn = sym_to_pfn(vdso_data); + break; +#endif /* CONFIG_TIME_NS */ + default: + return VM_FAULT_SIGBUS; + } + + return vmf_insert_pfn(vma, vmf->address, pfn); +} + +enum rv_vdso_map { + RV_VDSO_MAP_VVAR, + RV_VDSO_MAP_VDSO, +}; + +static struct vm_special_mapping rv_vdso_maps[] __ro_after_init = { + [RV_VDSO_MAP_VVAR] = { + .name = "[vvar]", + .fault = vvar_fault, + }, + [RV_VDSO_MAP_VDSO] = { + .name = "[vdso]", + .mremap = vdso_mremap, + }, +}; + +static int __init vdso_init(void) +{ + vdso_info.dm = &rv_vdso_maps[RV_VDSO_MAP_VVAR]; + vdso_info.cm = &rv_vdso_maps[RV_VDSO_MAP_VDSO]; + + return __vdso_init(); +} arch_initcall(vdso_init); -int arch_setup_additional_pages(struct linux_binprm *bprm, - int uses_interp) +static int __setup_additional_pages(struct mm_struct *mm, + struct linux_binprm *bprm, + int uses_interp) { - struct mm_struct *mm = current->mm; - unsigned long vdso_base, vdso_len; - int ret; + unsigned long vdso_base, vdso_text_len, vdso_mapping_len; + void *ret; BUILD_BUG_ON(VVAR_NR_PAGES != __VVAR_PAGES); - vdso_len = (vdso_pages + VVAR_NR_PAGES) << PAGE_SHIFT; + vdso_text_len = vdso_info.vdso_pages << PAGE_SHIFT; + /* Be sure to map the data page */ + vdso_mapping_len = vdso_text_len + VVAR_SIZE; - if (mmap_write_lock_killable(mm)) - return -EINTR; - - vdso_base = get_unmapped_area(NULL, 0, vdso_len, 0, 0); + vdso_base = get_unmapped_area(NULL, 0, vdso_mapping_len, 0, 0); if (IS_ERR_VALUE(vdso_base)) { - ret = vdso_base; - goto end; + ret = ERR_PTR(vdso_base); + goto up_fail; } - mm->context.vdso = NULL; - ret = install_special_mapping(mm, vdso_base, VVAR_SIZE, - (VM_READ | VM_MAYREAD), &vdso_pagelist[vdso_pages]); - if (unlikely(ret)) - goto end; + ret = _install_special_mapping(mm, vdso_base, VVAR_SIZE, + (VM_READ | VM_MAYREAD | VM_PFNMAP), vdso_info.dm); + if (IS_ERR(ret)) + goto up_fail; + vdso_base += VVAR_SIZE; + mm->context.vdso = (void *)vdso_base; ret = - install_special_mapping(mm, vdso_base + VVAR_SIZE, - vdso_pages << PAGE_SHIFT, + _install_special_mapping(mm, vdso_base, vdso_text_len, (VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC), - vdso_pagelist); + vdso_info.cm); - if (unlikely(ret)) - goto end; + if (IS_ERR(ret)) + goto up_fail; - /* - * Put vDSO base into mm struct. We need to do this before calling - * install_special_mapping or the perf counter mmap tracking code - * will fail to recognise it as a vDSO (since arch_vma_name fails). - */ - mm->context.vdso = (void *)vdso_base + VVAR_SIZE; + return 0; -end: - mmap_write_unlock(mm); - return ret; +up_fail: + mm->context.vdso = NULL; + return PTR_ERR(ret); } -const char *arch_vma_name(struct vm_area_struct *vma) +int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) { - if (vma->vm_mm && (vma->vm_start == (long)vma->vm_mm->context.vdso)) - return "[vdso]"; - if (vma->vm_mm && (vma->vm_start == - (long)vma->vm_mm->context.vdso - VVAR_SIZE)) - return "[vdso_data]"; - return NULL; + struct mm_struct *mm = current->mm; + int ret; + + if (mmap_write_lock_killable(mm)) + return -EINTR; + + ret = __setup_additional_pages(mm, bprm, uses_interp); + mmap_write_unlock(mm); + + return ret; } diff --git a/arch/riscv/kernel/vdso/vdso.lds.S b/arch/riscv/kernel/vdso/vdso.lds.S index e9111f700af0..01d94aae5bf5 100644 --- a/arch/riscv/kernel/vdso/vdso.lds.S +++ b/arch/riscv/kernel/vdso/vdso.lds.S @@ -10,6 +10,9 @@ OUTPUT_ARCH(riscv) SECTIONS { PROVIDE(_vdso_data = . - __VVAR_PAGES * PAGE_SIZE); +#ifdef CONFIG_TIME_NS + PROVIDE(_timens_data = _vdso_data + PAGE_SIZE); +#endif . = SIZEOF_HEADERS; .hash : { *(.hash) } :text diff --git a/arch/riscv/kernel/vmlinux-xip.lds.S b/arch/riscv/kernel/vmlinux-xip.lds.S index 9c9f35091ef0..f5ed08262139 100644 --- a/arch/riscv/kernel/vmlinux-xip.lds.S +++ b/arch/riscv/kernel/vmlinux-xip.lds.S @@ -64,8 +64,11 @@ SECTIONS /* * From this point, stuff is considered writable and will be copied to RAM */ - __data_loc = ALIGN(16); /* location in file */ - . = LOAD_OFFSET + XIP_OFFSET; /* location in memory */ + __data_loc = ALIGN(PAGE_SIZE); /* location in file */ + . = KERNEL_LINK_ADDR + XIP_OFFSET; /* location in memory */ + +#undef LOAD_OFFSET +#define LOAD_OFFSET (KERNEL_LINK_ADDR + XIP_OFFSET - (__data_loc & XIP_OFFSET_MASK)) _sdata = .; /* Start of data section */ _data = .; @@ -96,7 +99,6 @@ SECTIONS KEEP(*(__soc_builtin_dtb_table)) __soc_builtin_dtb_table_end = .; } - PERCPU_SECTION(L1_CACHE_BYTES) . = ALIGN(8); .alternative : { @@ -122,6 +124,8 @@ SECTIONS BSS_SECTION(PAGE_SIZE, PAGE_SIZE, 0) + PERCPU_SECTION(L1_CACHE_BYTES) + .rel.dyn : AT(ADDR(.rel.dyn) - LOAD_OFFSET) { *(.rel.dyn*) } diff --git a/arch/riscv/lib/delay.c b/arch/riscv/lib/delay.c index f51c9a03bca1..49d510ba75fd 100644 --- a/arch/riscv/lib/delay.c +++ b/arch/riscv/lib/delay.c @@ -4,10 +4,14 @@ */ #include <linux/delay.h> +#include <linux/math.h> #include <linux/param.h> #include <linux/timex.h> +#include <linux/types.h> #include <linux/export.h> +#include <asm/processor.h> + /* * This is copies from arch/arm/include/asm/delay.h * diff --git a/arch/riscv/mm/context.c b/arch/riscv/mm/context.c index ee3459cb6750..ea54cc0c9106 100644 --- a/arch/riscv/mm/context.c +++ b/arch/riscv/mm/context.c @@ -233,8 +233,10 @@ static int __init asids_init(void) local_flush_tlb_all(); /* Pre-compute ASID details */ - num_asids = 1 << asid_bits; - asid_mask = num_asids - 1; + if (asid_bits) { + num_asids = 1 << asid_bits; + asid_mask = num_asids - 1; + } /* * Use ASID allocator only if number of HW ASIDs are @@ -255,7 +257,7 @@ static int __init asids_init(void) pr_info("ASID allocator using %lu bits (%lu entries)\n", asid_bits, num_asids); } else { - pr_info("ASID allocator disabled\n"); + pr_info("ASID allocator disabled (%lu bits)\n", asid_bits); } return 0; diff --git a/arch/riscv/mm/extable.c b/arch/riscv/mm/extable.c index 18bf338303b6..ddb7d3b99e89 100644 --- a/arch/riscv/mm/extable.c +++ b/arch/riscv/mm/extable.c @@ -11,7 +11,7 @@ #include <linux/module.h> #include <linux/uaccess.h> -#ifdef CONFIG_BPF_JIT +#if defined(CONFIG_BPF_JIT) && defined(CONFIG_ARCH_RV64I) int rv_bpf_fixup_exception(const struct exception_table_entry *ex, struct pt_regs *regs); #endif @@ -23,7 +23,7 @@ int fixup_exception(struct pt_regs *regs) if (!fixup) return 0; -#ifdef CONFIG_BPF_JIT +#if defined(CONFIG_BPF_JIT) && defined(CONFIG_ARCH_RV64I) if (regs->epc >= BPF_JIT_REGION_START && regs->epc < BPF_JIT_REGION_END) return rv_bpf_fixup_exception(fixup, regs); #endif diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c index c0cddf0fc22d..24b2b8044602 100644 --- a/arch/riscv/mm/init.c +++ b/arch/riscv/mm/init.c @@ -41,7 +41,7 @@ phys_addr_t phys_ram_base __ro_after_init; EXPORT_SYMBOL(phys_ram_base); #ifdef CONFIG_XIP_KERNEL -extern char _xiprom[], _exiprom[]; +extern char _xiprom[], _exiprom[], __data_loc; #endif unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] @@ -454,10 +454,9 @@ static uintptr_t __init best_map_size(phys_addr_t base, phys_addr_t size) /* called from head.S with MMU off */ asmlinkage void __init __copy_data(void) { - void *from = (void *)(&_sdata); - void *end = (void *)(&_end); + void *from = (void *)(&__data_loc); void *to = (void *)CONFIG_PHYS_RAM_BASE; - size_t sz = (size_t)(end - from + 1); + size_t sz = (size_t)((uintptr_t)(&_end) - (uintptr_t)(&_sdata)); memcpy(to, from, sz); } diff --git a/arch/riscv/net/bpf_jit_comp64.c b/arch/riscv/net/bpf_jit_comp64.c index 2ca345c7b0bf..f2a779c7e225 100644 --- a/arch/riscv/net/bpf_jit_comp64.c +++ b/arch/riscv/net/bpf_jit_comp64.c @@ -460,6 +460,8 @@ static int emit_call(bool fixed, u64 addr, struct rv_jit_context *ctx) #define BPF_FIXUP_REG_MASK GENMASK(31, 27) int rv_bpf_fixup_exception(const struct exception_table_entry *ex, + struct pt_regs *regs); +int rv_bpf_fixup_exception(const struct exception_table_entry *ex, struct pt_regs *regs) { off_t offset = FIELD_GET(BPF_FIXUP_OFFSET_MASK, ex->fixup); diff --git a/arch/s390/Kbuild b/arch/s390/Kbuild index 8b98c501142d..76e362277179 100644 --- a/arch/s390/Kbuild +++ b/arch/s390/Kbuild @@ -8,3 +8,6 @@ obj-$(CONFIG_APPLDATA_BASE) += appldata/ obj-y += net/ obj-$(CONFIG_PCI) += pci/ obj-$(CONFIG_ARCH_HAS_KEXEC_PURGATORY) += purgatory/ + +# for cleaning +subdir- += boot tools diff --git a/arch/s390/Makefile b/arch/s390/Makefile index 450b351dfa8e..69c45f600273 100644 --- a/arch/s390/Makefile +++ b/arch/s390/Makefile @@ -3,9 +3,7 @@ # s390/Makefile # # This file is included by the global makefile so that you can add your own -# architecture-specific flags and dependencies. Remember to do have actions -# for "archclean" and "archdep" for cleaning up and making dependencies for -# this architecture +# architecture-specific flags and dependencies. # # Copyright (C) 1994 by Linus Torvalds # @@ -147,10 +145,6 @@ zfcpdump: vdso_install: $(Q)$(MAKE) $(build)=arch/$(ARCH)/kernel/vdso64 $@ -archclean: - $(Q)$(MAKE) $(clean)=$(boot) - $(Q)$(MAKE) $(clean)=$(tools) - archheaders: $(Q)$(MAKE) $(build)=$(syscalls) uapi diff --git a/arch/s390/include/asm/facility.h b/arch/s390/include/asm/facility.h index e3aa354ab9f4..94b6919026df 100644 --- a/arch/s390/include/asm/facility.h +++ b/arch/s390/include/asm/facility.h @@ -9,8 +9,12 @@ #define __ASM_FACILITY_H #include <asm/facility-defs.h> + +#include <linux/minmax.h> #include <linux/string.h> +#include <linux/types.h> #include <linux/preempt.h> + #include <asm/lowcore.h> #define MAX_FACILITY_BIT (sizeof(stfle_fac_list) * 8) diff --git a/arch/s390/include/asm/kdebug.h b/arch/s390/include/asm/kdebug.h index d5327f064799..4377238e4752 100644 --- a/arch/s390/include/asm/kdebug.h +++ b/arch/s390/include/asm/kdebug.h @@ -23,6 +23,6 @@ enum die_val { DIE_NMI_IPI, }; -extern void die(struct pt_regs *, const char *); +extern void __noreturn die(struct pt_regs *, const char *); #endif diff --git a/arch/s390/include/asm/syscall.h b/arch/s390/include/asm/syscall.h index b3dd883699e7..27e3d804b311 100644 --- a/arch/s390/include/asm/syscall.h +++ b/arch/s390/include/asm/syscall.h @@ -78,18 +78,6 @@ static inline void syscall_get_arguments(struct task_struct *task, args[0] = regs->orig_gpr2 & mask; } -static inline void syscall_set_arguments(struct task_struct *task, - struct pt_regs *regs, - const unsigned long *args) -{ - unsigned int n = 6; - - while (n-- > 0) - if (n > 0) - regs->gprs[2 + n] = args[n]; - regs->orig_gpr2 = args[0]; -} - static inline int syscall_get_arch(struct task_struct *task) { #ifdef CONFIG_COMPAT diff --git a/arch/s390/kernel/dumpstack.c b/arch/s390/kernel/dumpstack.c index 85f326e258df..0681c55e831d 100644 --- a/arch/s390/kernel/dumpstack.c +++ b/arch/s390/kernel/dumpstack.c @@ -192,7 +192,7 @@ void show_regs(struct pt_regs *regs) static DEFINE_SPINLOCK(die_lock); -void die(struct pt_regs *regs, const char *str) +void __noreturn die(struct pt_regs *regs, const char *str) { static int die_counter; diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c index 6c6f7dcce1a5..035705c9f23e 100644 --- a/arch/s390/kernel/traps.c +++ b/arch/s390/kernel/traps.c @@ -84,7 +84,7 @@ static void default_trap_handler(struct pt_regs *regs) { if (user_mode(regs)) { report_user_fault(regs, SIGSEGV, 0); - do_exit(SIGSEGV); + force_fatal_sig(SIGSEGV); } else die(regs, "Unknown program exception"); } diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index 212632d57db9..d30f5986fa85 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c @@ -260,7 +260,6 @@ static noinline void do_no_context(struct pt_regs *regs) " in virtual user address space\n"); dump_fault_info(regs); die(regs, "Oops"); - do_exit(SIGKILL); } static noinline void do_low_address(struct pt_regs *regs) @@ -270,7 +269,6 @@ static noinline void do_low_address(struct pt_regs *regs) if (regs->psw.mask & PSW_MASK_PSTATE) { /* Low-address protection hit in user mode 'cannot happen'. */ die (regs, "Low-address protection"); - do_exit(SIGKILL); } do_no_context(regs); diff --git a/arch/sh/Kbuild b/arch/sh/Kbuild index 48c2a091a072..be171880977e 100644 --- a/arch/sh/Kbuild +++ b/arch/sh/Kbuild @@ -2,3 +2,6 @@ obj-y += kernel/ mm/ boards/ obj-$(CONFIG_SH_FPU_EMU) += math-emu/ obj-$(CONFIG_USE_BUILTIN_DTB) += boot/dts/ + +# for cleaning +subdir- += boot diff --git a/arch/sh/Makefile b/arch/sh/Makefile index 88ddb6f1c75b..b39412bf91fb 100644 --- a/arch/sh/Makefile +++ b/arch/sh/Makefile @@ -198,10 +198,6 @@ compressed: zImage archprepare: $(Q)$(MAKE) $(build)=arch/sh/tools include/generated/machtypes.h -archclean: - $(Q)$(MAKE) $(clean)=$(boot) - $(Q)$(MAKE) $(clean)=arch/sh/kernel/vsyscall - archheaders: $(Q)$(MAKE) $(build)=arch/sh/kernel/syscalls all diff --git a/arch/sh/include/asm/syscall_32.h b/arch/sh/include/asm/syscall_32.h index cb51a7528384..d87738eebe30 100644 --- a/arch/sh/include/asm/syscall_32.h +++ b/arch/sh/include/asm/syscall_32.h @@ -57,18 +57,6 @@ static inline void syscall_get_arguments(struct task_struct *task, args[0] = regs->regs[4]; } -static inline void syscall_set_arguments(struct task_struct *task, - struct pt_regs *regs, - const unsigned long *args) -{ - regs->regs[1] = args[5]; - regs->regs[0] = args[4]; - regs->regs[7] = args[3]; - regs->regs[6] = args[2]; - regs->regs[5] = args[1]; - regs->regs[4] = args[0]; -} - static inline int syscall_get_arch(struct task_struct *task) { int arch = AUDIT_ARCH_SH; diff --git a/arch/sh/kernel/cpu/fpu.c b/arch/sh/kernel/cpu/fpu.c index ae354a2931e7..fd6db0ab1928 100644 --- a/arch/sh/kernel/cpu/fpu.c +++ b/arch/sh/kernel/cpu/fpu.c @@ -62,18 +62,20 @@ void fpu_state_restore(struct pt_regs *regs) } if (!tsk_used_math(tsk)) { - local_irq_enable(); + int ret; /* * does a slab alloc which can sleep */ - if (init_fpu(tsk)) { + local_irq_enable(); + ret = init_fpu(tsk); + local_irq_disable(); + if (ret) { /* * ran out of memory! */ - do_group_exit(SIGKILL); + force_sig(SIGKILL); return; } - local_irq_disable(); } grab_fpu(regs); diff --git a/arch/sh/kernel/traps.c b/arch/sh/kernel/traps.c index e76b22157099..cbe3201d4f21 100644 --- a/arch/sh/kernel/traps.c +++ b/arch/sh/kernel/traps.c @@ -20,7 +20,7 @@ static DEFINE_SPINLOCK(die_lock); -void die(const char *str, struct pt_regs *regs, long err) +void __noreturn die(const char *str, struct pt_regs *regs, long err) { static int die_counter; diff --git a/arch/sh/mm/fault.c b/arch/sh/mm/fault.c index 88a1f453d73e..1e1aa75df3ca 100644 --- a/arch/sh/mm/fault.c +++ b/arch/sh/mm/fault.c @@ -238,8 +238,6 @@ no_context(struct pt_regs *regs, unsigned long error_code, show_fault_oops(regs, address); die("Oops", regs, error_code); - bust_spinlocks(0); - do_exit(SIGKILL); } static void diff --git a/arch/sparc/Kbuild b/arch/sparc/Kbuild index c9e574906a9b..71cb3d934bf6 100644 --- a/arch/sparc/Kbuild +++ b/arch/sparc/Kbuild @@ -9,3 +9,6 @@ obj-y += math-emu/ obj-y += net/ obj-y += crypto/ obj-$(CONFIG_SPARC64) += vdso/ + +# for cleaning +subdir- += boot diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index b120ed947f50..66fc08646be5 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig @@ -53,8 +53,9 @@ config SPARC32 def_bool !64BIT select ARCH_32BIT_OFF_T select ARCH_HAS_SYNC_DMA_FOR_CPU - select GENERIC_ATOMIC64 select CLZ_TAB + select DMA_DIRECT_REMAP + select GENERIC_ATOMIC64 select HAVE_UID16 select OLD_SIGACTION select ZONE_DMA diff --git a/arch/sparc/Makefile b/arch/sparc/Makefile index 24fb5a99f439..c7008bbebc4c 100644 --- a/arch/sparc/Makefile +++ b/arch/sparc/Makefile @@ -75,9 +75,6 @@ install: sh $(srctree)/$(boot)/install.sh $(KERNELRELEASE) $(KBUILD_IMAGE) \ System.map "$(INSTALL_PATH)" -archclean: - $(Q)$(MAKE) $(clean)=$(boot) - archheaders: $(Q)$(MAKE) $(build)=arch/sparc/kernel/syscalls all diff --git a/arch/sparc/boot/Makefile b/arch/sparc/boot/Makefile index 849236d4eca4..45e5c76d449e 100644 --- a/arch/sparc/boot/Makefile +++ b/arch/sparc/boot/Makefile @@ -22,7 +22,7 @@ ifeq ($(CONFIG_SPARC64),y) # Actual linking -$(obj)/zImage: $(obj)/image +$(obj)/zImage: $(obj)/image FORCE $(call if_changed,gzip) @echo ' kernel: $@ is ready' @@ -31,7 +31,7 @@ $(obj)/vmlinux.aout: vmlinux FORCE @echo ' kernel: $@ is ready' else -$(obj)/zImage: $(obj)/image +$(obj)/zImage: $(obj)/image FORCE $(call if_changed,strip) @echo ' kernel: $@ is ready' @@ -44,7 +44,7 @@ OBJCOPYFLAGS_image.bin := -S -O binary -R .note -R .comment $(obj)/image.bin: $(obj)/image FORCE $(call if_changed,objcopy) -$(obj)/image.gz: $(obj)/image.bin +$(obj)/image.gz: $(obj)/image.bin FORCE $(call if_changed,gzip) UIMAGE_LOADADDR = $(CONFIG_UBOOT_LOAD_ADDR) @@ -56,7 +56,7 @@ quiet_cmd_uimage.o = UIMAGE.O $@ -r -b binary $@ -o $@.o targets += uImage -$(obj)/uImage: $(obj)/image.gz +$(obj)/uImage: $(obj)/image.gz FORCE $(call if_changed,uimage) $(call if_changed,uimage.o) @echo ' Image $@ is ready' diff --git a/arch/sparc/include/asm/syscall.h b/arch/sparc/include/asm/syscall.h index 62a5a78804c4..20c109ac8cc9 100644 --- a/arch/sparc/include/asm/syscall.h +++ b/arch/sparc/include/asm/syscall.h @@ -117,16 +117,6 @@ static inline void syscall_get_arguments(struct task_struct *task, } } -static inline void syscall_set_arguments(struct task_struct *task, - struct pt_regs *regs, - const unsigned long *args) -{ - unsigned int i; - - for (i = 0; i < 6; i++) - regs->u_regs[UREG_I0 + i] = args[i]; -} - static inline int syscall_get_arch(struct task_struct *task) { #if defined(CONFIG_SPARC64) && defined(CONFIG_COMPAT) diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c index 7ceae24b0ca9..57a72c46eddb 100644 --- a/arch/sparc/kernel/ioport.c +++ b/arch/sparc/kernel/ioport.c @@ -52,17 +52,6 @@ #include <asm/io-unit.h> #include <asm/leon.h> -/* This function must make sure that caches and memory are coherent after DMA - * On LEON systems without cache snooping it flushes the entire D-CACHE. - */ -static inline void dma_make_coherent(unsigned long pa, unsigned long len) -{ - if (sparc_cpu_model == sparc_leon) { - if (!sparc_leon3_snooping_enabled()) - leon_flush_dcache_all(); - } -} - static void __iomem *_sparc_ioremap(struct resource *res, u32 bus, u32 pa, int sz); static void __iomem *_sparc_alloc_io(unsigned int busno, unsigned long phys, unsigned long size, char *name); @@ -311,68 +300,19 @@ arch_initcall(sparc_register_ioport); #endif /* CONFIG_SBUS */ - -/* Allocate and map kernel buffer using consistent mode DMA for a device. - * hwdev should be valid struct pci_dev pointer for PCI devices. - */ -void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, - gfp_t gfp, unsigned long attrs) -{ - unsigned long addr; - void *va; - - if (!size || size > 256 * 1024) /* __get_free_pages() limit */ - return NULL; - - size = PAGE_ALIGN(size); - va = (void *) __get_free_pages(gfp | __GFP_ZERO, get_order(size)); - if (!va) { - printk("%s: no %zd pages\n", __func__, size >> PAGE_SHIFT); - return NULL; - } - - addr = sparc_dma_alloc_resource(dev, size); - if (!addr) - goto err_nomem; - - srmmu_mapiorange(0, virt_to_phys(va), addr, size); - - *dma_handle = virt_to_phys(va); - return (void *)addr; - -err_nomem: - free_pages((unsigned long)va, get_order(size)); - return NULL; -} - -/* Free and unmap a consistent DMA buffer. - * cpu_addr is what was returned arch_dma_alloc, size must be the same as what - * was passed into arch_dma_alloc, and likewise dma_addr must be the same as - * what *dma_ndler was set to. +/* + * IIep is write-through, not flushing on cpu to device transfer. * - * References to the memory and mappings associated with cpu_addr/dma_addr - * past this call are illegal. + * On LEON systems without cache snooping, the entire D-CACHE must be flushed to + * make DMA to cacheable memory coherent. */ -void arch_dma_free(struct device *dev, size_t size, void *cpu_addr, - dma_addr_t dma_addr, unsigned long attrs) -{ - size = PAGE_ALIGN(size); - - if (!sparc_dma_free_resource(cpu_addr, size)) - return; - - dma_make_coherent(dma_addr, size); - srmmu_unmapiorange((unsigned long)cpu_addr, size); - free_pages((unsigned long)phys_to_virt(dma_addr), get_order(size)); -} - -/* IIep is write-through, not flushing on cpu to device transfer. */ - void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size, enum dma_data_direction dir) { - if (dir != PCI_DMA_TODEVICE) - dma_make_coherent(paddr, PAGE_ALIGN(size)); + if (dir != PCI_DMA_TODEVICE && + sparc_cpu_model == sparc_leon && + !sparc_leon3_snooping_enabled()) + leon_flush_dcache_all(); } #ifdef CONFIG_PROC_FS diff --git a/arch/sparc/kernel/signal_32.c b/arch/sparc/kernel/signal_32.c index 02f3ad55dfe3..cd677bc564a7 100644 --- a/arch/sparc/kernel/signal_32.c +++ b/arch/sparc/kernel/signal_32.c @@ -244,7 +244,7 @@ static int setup_frame(struct ksignal *ksig, struct pt_regs *regs, get_sigframe(ksig, regs, sigframe_size); if (invalid_frame_pointer(sf, sigframe_size)) { - do_exit(SIGILL); + force_fatal_sig(SIGILL); return -EINVAL; } @@ -336,7 +336,7 @@ static int setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs, sf = (struct rt_signal_frame __user *) get_sigframe(ksig, regs, sigframe_size); if (invalid_frame_pointer(sf, sigframe_size)) { - do_exit(SIGILL); + force_fatal_sig(SIGILL); return -EINVAL; } diff --git a/arch/sparc/kernel/windows.c b/arch/sparc/kernel/windows.c index 69a6ba6e9293..bbbd40cc6b28 100644 --- a/arch/sparc/kernel/windows.c +++ b/arch/sparc/kernel/windows.c @@ -121,8 +121,10 @@ void try_to_clear_window_buffer(struct pt_regs *regs, int who) if ((sp & 7) || copy_to_user((char __user *) sp, &tp->reg_window[window], - sizeof(struct reg_window32))) - do_exit(SIGILL); + sizeof(struct reg_window32))) { + force_fatal_sig(SIGILL); + return; + } } tp->w_saved = 0; } diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c index fa858626b85b..90dc4ae315c8 100644 --- a/arch/sparc/mm/fault_32.c +++ b/arch/sparc/mm/fault_32.c @@ -248,7 +248,6 @@ no_context: } unhandled_fault(address, tsk, regs); - do_exit(SIGKILL); /* * We ran out of memory, or some other thing happened to us that made diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c index 0dce4b7ff73e..912205787161 100644 --- a/arch/sparc/mm/tsb.c +++ b/arch/sparc/mm/tsb.c @@ -266,7 +266,7 @@ static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_idx, unsign default: printk(KERN_ERR "TSB[%s:%d]: Impossible TSB size %lu, killing process.\n", current->comm, current->pid, tsb_bytes); - do_exit(SIGSEGV); + BUG(); } tte |= pte_sz_bits(page_sz); diff --git a/arch/um/include/asm/syscall-generic.h b/arch/um/include/asm/syscall-generic.h index 2984feb9d576..172b74143c4b 100644 --- a/arch/um/include/asm/syscall-generic.h +++ b/arch/um/include/asm/syscall-generic.h @@ -62,20 +62,6 @@ static inline void syscall_get_arguments(struct task_struct *task, *args = UPT_SYSCALL_ARG6(r); } -static inline void syscall_set_arguments(struct task_struct *task, - struct pt_regs *regs, - const unsigned long *args) -{ - struct uml_pt_regs *r = ®s->regs; - - UPT_SYSCALL_ARG1(r) = *args++; - UPT_SYSCALL_ARG2(r) = *args++; - UPT_SYSCALL_ARG3(r) = *args++; - UPT_SYSCALL_ARG4(r) = *args++; - UPT_SYSCALL_ARG5(r) = *args++; - UPT_SYSCALL_ARG6(r) = *args; -} - /* See arch/x86/um/asm/syscall.h for syscall_get_arch() definition. */ #endif /* __UM_SYSCALL_GENERIC_H */ diff --git a/arch/um/kernel/trap.c b/arch/um/kernel/trap.c index 3198c4767387..c32efb09db21 100644 --- a/arch/um/kernel/trap.c +++ b/arch/um/kernel/trap.c @@ -158,7 +158,7 @@ static void bad_segv(struct faultinfo fi, unsigned long ip) void fatal_sigsegv(void) { - force_sigsegv(SIGSEGV); + force_fatal_sig(SIGSEGV); do_signal(¤t->thread.regs); /* * This is to tell gcc that we're not returning - do_signal diff --git a/arch/x86/Kbuild b/arch/x86/Kbuild index 30dec019756b..f384cb1a4f7a 100644 --- a/arch/x86/Kbuild +++ b/arch/x86/Kbuild @@ -25,3 +25,6 @@ obj-y += platform/ obj-y += net/ obj-$(CONFIG_KEXEC_FILE) += purgatory/ + +# for cleaning +subdir- += boot tools diff --git a/arch/x86/Makefile b/arch/x86/Makefile index aab70413ae7a..42243869216d 100644 --- a/arch/x86/Makefile +++ b/arch/x86/Makefile @@ -283,8 +283,6 @@ endif archclean: $(Q)rm -rf $(objtree)/arch/i386 $(Q)rm -rf $(objtree)/arch/x86_64 - $(Q)$(MAKE) $(clean)=$(boot) - $(Q)$(MAKE) $(clean)=arch/x86/tools define archhelp echo '* bzImage - Compressed kernel image (arch/x86/boot/bzImage)' diff --git a/arch/x86/entry/vsyscall/vsyscall_64.c b/arch/x86/entry/vsyscall/vsyscall_64.c index 1b40b9297083..0b6b277ee050 100644 --- a/arch/x86/entry/vsyscall/vsyscall_64.c +++ b/arch/x86/entry/vsyscall/vsyscall_64.c @@ -226,7 +226,8 @@ bool emulate_vsyscall(unsigned long error_code, if ((!tmp && regs->orig_ax != syscall_nr) || regs->ip != address) { warn_bad_vsyscall(KERN_DEBUG, regs, "seccomp tried to change syscall nr or ip"); - do_exit(SIGSYS); + force_fatal_sig(SIGSYS); + return true; } regs->orig_ax = -1; if (tmp) diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h index d9d6b0203ec4..fc1151e77569 100644 --- a/arch/x86/include/asm/paravirt_types.h +++ b/arch/x86/include/asm/paravirt_types.h @@ -577,7 +577,9 @@ void paravirt_leave_lazy_mmu(void); void paravirt_flush_lazy_mmu(void); void _paravirt_nop(void); +void paravirt_BUG(void); u64 _paravirt_ident_64(u64); +unsigned long paravirt_ret0(void); #define paravirt_nop ((void *)_paravirt_nop) diff --git a/arch/x86/include/asm/syscall.h b/arch/x86/include/asm/syscall.h index f7e2d82d24fb..5b85987a5e97 100644 --- a/arch/x86/include/asm/syscall.h +++ b/arch/x86/include/asm/syscall.h @@ -87,15 +87,6 @@ static inline void syscall_get_arguments(struct task_struct *task, memcpy(args, ®s->bx, 6 * sizeof(args[0])); } -static inline void syscall_set_arguments(struct task_struct *task, - struct pt_regs *regs, - unsigned int i, unsigned int n, - const unsigned long *args) -{ - BUG_ON(i + n > 6); - memcpy(®s->bx + i, args, n * sizeof(args[0])); -} - static inline int syscall_get_arch(struct task_struct *task) { return AUDIT_ARCH_I386; @@ -127,30 +118,6 @@ static inline void syscall_get_arguments(struct task_struct *task, } } -static inline void syscall_set_arguments(struct task_struct *task, - struct pt_regs *regs, - const unsigned long *args) -{ -# ifdef CONFIG_IA32_EMULATION - if (task->thread_info.status & TS_COMPAT) { - regs->bx = *args++; - regs->cx = *args++; - regs->dx = *args++; - regs->si = *args++; - regs->di = *args++; - regs->bp = *args; - } else -# endif - { - regs->di = *args++; - regs->si = *args++; - regs->dx = *args++; - regs->r10 = *args++; - regs->r8 = *args++; - regs->r9 = *args; - } -} - static inline int syscall_get_arch(struct task_struct *task) { /* x32 tasks should be considered AUDIT_ARCH_X86_64. */ diff --git a/arch/x86/include/asm/xen/hypercall.h b/arch/x86/include/asm/xen/hypercall.h index 4a7ff8b0db20..0575f5863b7f 100644 --- a/arch/x86/include/asm/xen/hypercall.h +++ b/arch/x86/include/asm/xen/hypercall.h @@ -248,6 +248,7 @@ privcmd_call(unsigned int call, return res; } +#ifdef CONFIG_XEN_PV static inline int HYPERVISOR_set_trap_table(struct trap_info *table) { @@ -281,6 +282,107 @@ HYPERVISOR_callback_op(int cmd, void *arg) } static inline int +HYPERVISOR_set_debugreg(int reg, unsigned long value) +{ + return _hypercall2(int, set_debugreg, reg, value); +} + +static inline unsigned long +HYPERVISOR_get_debugreg(int reg) +{ + return _hypercall1(unsigned long, get_debugreg, reg); +} + +static inline int +HYPERVISOR_update_descriptor(u64 ma, u64 desc) +{ + return _hypercall2(int, update_descriptor, ma, desc); +} + +static inline int +HYPERVISOR_update_va_mapping(unsigned long va, pte_t new_val, + unsigned long flags) +{ + return _hypercall3(int, update_va_mapping, va, new_val.pte, flags); +} + +static inline int +HYPERVISOR_set_segment_base(int reg, unsigned long value) +{ + return _hypercall2(int, set_segment_base, reg, value); +} + +static inline void +MULTI_fpu_taskswitch(struct multicall_entry *mcl, int set) +{ + mcl->op = __HYPERVISOR_fpu_taskswitch; + mcl->args[0] = set; + + trace_xen_mc_entry(mcl, 1); +} + +static inline void +MULTI_update_va_mapping(struct multicall_entry *mcl, unsigned long va, + pte_t new_val, unsigned long flags) +{ + mcl->op = __HYPERVISOR_update_va_mapping; + mcl->args[0] = va; + mcl->args[1] = new_val.pte; + mcl->args[2] = flags; + + trace_xen_mc_entry(mcl, 3); +} + +static inline void +MULTI_update_descriptor(struct multicall_entry *mcl, u64 maddr, + struct desc_struct desc) +{ + mcl->op = __HYPERVISOR_update_descriptor; + mcl->args[0] = maddr; + mcl->args[1] = *(unsigned long *)&desc; + + trace_xen_mc_entry(mcl, 2); +} + +static inline void +MULTI_mmu_update(struct multicall_entry *mcl, struct mmu_update *req, + int count, int *success_count, domid_t domid) +{ + mcl->op = __HYPERVISOR_mmu_update; + mcl->args[0] = (unsigned long)req; + mcl->args[1] = count; + mcl->args[2] = (unsigned long)success_count; + mcl->args[3] = domid; + + trace_xen_mc_entry(mcl, 4); +} + +static inline void +MULTI_mmuext_op(struct multicall_entry *mcl, struct mmuext_op *op, int count, + int *success_count, domid_t domid) +{ + mcl->op = __HYPERVISOR_mmuext_op; + mcl->args[0] = (unsigned long)op; + mcl->args[1] = count; + mcl->args[2] = (unsigned long)success_count; + mcl->args[3] = domid; + + trace_xen_mc_entry(mcl, 4); +} + +static inline void +MULTI_stack_switch(struct multicall_entry *mcl, + unsigned long ss, unsigned long esp) +{ + mcl->op = __HYPERVISOR_stack_switch; + mcl->args[0] = ss; + mcl->args[1] = esp; + + trace_xen_mc_entry(mcl, 2); +} +#endif + +static inline int HYPERVISOR_sched_op(int cmd, void *arg) { return _hypercall2(int, sched_op, cmd, arg); @@ -308,26 +410,6 @@ HYPERVISOR_platform_op(struct xen_platform_op *op) return _hypercall1(int, platform_op, op); } -static __always_inline int -HYPERVISOR_set_debugreg(int reg, unsigned long value) -{ - return _hypercall2(int, set_debugreg, reg, value); -} - -static __always_inline unsigned long -HYPERVISOR_get_debugreg(int reg) -{ - return _hypercall1(unsigned long, get_debugreg, reg); -} - -static inline int -HYPERVISOR_update_descriptor(u64 ma, u64 desc) -{ - if (sizeof(u64) == sizeof(long)) - return _hypercall2(int, update_descriptor, ma, desc); - return _hypercall4(int, update_descriptor, ma, ma>>32, desc, desc>>32); -} - static inline long HYPERVISOR_memory_op(unsigned int cmd, void *arg) { @@ -341,18 +423,6 @@ HYPERVISOR_multicall(void *call_list, uint32_t nr_calls) } static inline int -HYPERVISOR_update_va_mapping(unsigned long va, pte_t new_val, - unsigned long flags) -{ - if (sizeof(new_val) == sizeof(long)) - return _hypercall3(int, update_va_mapping, va, - new_val.pte, flags); - else - return _hypercall4(int, update_va_mapping, va, - new_val.pte, new_val.pte >> 32, flags); -} - -static inline int HYPERVISOR_event_channel_op(int cmd, void *arg) { return _hypercall2(int, event_channel_op, cmd, arg); @@ -394,14 +464,6 @@ HYPERVISOR_vcpu_op(int cmd, int vcpuid, void *extra_args) return _hypercall3(int, vcpu_op, cmd, vcpuid, extra_args); } -#ifdef CONFIG_X86_64 -static inline int -HYPERVISOR_set_segment_base(int reg, unsigned long value) -{ - return _hypercall2(int, set_segment_base, reg, value); -} -#endif - static inline int HYPERVISOR_suspend(unsigned long start_info_mfn) { @@ -423,13 +485,6 @@ HYPERVISOR_hvm_op(int op, void *arg) } static inline int -HYPERVISOR_tmem_op( - struct tmem_op *op) -{ - return _hypercall1(int, tmem_op, op); -} - -static inline int HYPERVISOR_xenpmu_op(unsigned int op, void *arg) { return _hypercall2(int, xenpmu_op, op, arg); @@ -446,88 +501,4 @@ HYPERVISOR_dm_op( return ret; } -static inline void -MULTI_fpu_taskswitch(struct multicall_entry *mcl, int set) -{ - mcl->op = __HYPERVISOR_fpu_taskswitch; - mcl->args[0] = set; - - trace_xen_mc_entry(mcl, 1); -} - -static inline void -MULTI_update_va_mapping(struct multicall_entry *mcl, unsigned long va, - pte_t new_val, unsigned long flags) -{ - mcl->op = __HYPERVISOR_update_va_mapping; - mcl->args[0] = va; - if (sizeof(new_val) == sizeof(long)) { - mcl->args[1] = new_val.pte; - mcl->args[2] = flags; - } else { - mcl->args[1] = new_val.pte; - mcl->args[2] = new_val.pte >> 32; - mcl->args[3] = flags; - } - - trace_xen_mc_entry(mcl, sizeof(new_val) == sizeof(long) ? 3 : 4); -} - -static inline void -MULTI_update_descriptor(struct multicall_entry *mcl, u64 maddr, - struct desc_struct desc) -{ - mcl->op = __HYPERVISOR_update_descriptor; - if (sizeof(maddr) == sizeof(long)) { - mcl->args[0] = maddr; - mcl->args[1] = *(unsigned long *)&desc; - } else { - u32 *p = (u32 *)&desc; - - mcl->args[0] = maddr; - mcl->args[1] = maddr >> 32; - mcl->args[2] = *p++; - mcl->args[3] = *p; - } - - trace_xen_mc_entry(mcl, sizeof(maddr) == sizeof(long) ? 2 : 4); -} - -static inline void -MULTI_mmu_update(struct multicall_entry *mcl, struct mmu_update *req, - int count, int *success_count, domid_t domid) -{ - mcl->op = __HYPERVISOR_mmu_update; - mcl->args[0] = (unsigned long)req; - mcl->args[1] = count; - mcl->args[2] = (unsigned long)success_count; - mcl->args[3] = domid; - - trace_xen_mc_entry(mcl, 4); -} - -static inline void -MULTI_mmuext_op(struct multicall_entry *mcl, struct mmuext_op *op, int count, - int *success_count, domid_t domid) -{ - mcl->op = __HYPERVISOR_mmuext_op; - mcl->args[0] = (unsigned long)op; - mcl->args[1] = count; - mcl->args[2] = (unsigned long)success_count; - mcl->args[3] = domid; - - trace_xen_mc_entry(mcl, 4); -} - -static inline void -MULTI_stack_switch(struct multicall_entry *mcl, - unsigned long ss, unsigned long esp) -{ - mcl->op = __HYPERVISOR_stack_switch; - mcl->args[0] = ss; - mcl->args[1] = esp; - - trace_xen_mc_entry(mcl, 2); -} - #endif /* _ASM_X86_XEN_HYPERCALL_H */ diff --git a/arch/x86/include/asm/xen/hypervisor.h b/arch/x86/include/asm/xen/hypervisor.h index ff4b52e37e60..4957f59deb40 100644 --- a/arch/x86/include/asm/xen/hypervisor.h +++ b/arch/x86/include/asm/xen/hypervisor.h @@ -62,4 +62,8 @@ void xen_arch_register_cpu(int num); void xen_arch_unregister_cpu(int num); #endif +#ifdef CONFIG_PVH +void __init xen_pvh_init(struct boot_params *boot_params); +#endif + #endif /* _ASM_X86_XEN_HYPERVISOR_H */ diff --git a/arch/x86/include/asm/xen/pci.h b/arch/x86/include/asm/xen/pci.h index 4557f7cb0fa6..9015b888edd6 100644 --- a/arch/x86/include/asm/xen/pci.h +++ b/arch/x86/include/asm/xen/pci.h @@ -22,25 +22,6 @@ static inline int __init pci_xen_initial_domain(void) return -1; } #endif -#ifdef CONFIG_XEN_DOM0 -int xen_find_device_domain_owner(struct pci_dev *dev); -int xen_register_device_domain_owner(struct pci_dev *dev, uint16_t domain); -int xen_unregister_device_domain_owner(struct pci_dev *dev); -#else -static inline int xen_find_device_domain_owner(struct pci_dev *dev) -{ - return -1; -} -static inline int xen_register_device_domain_owner(struct pci_dev *dev, - uint16_t domain) -{ - return -1; -} -static inline int xen_unregister_device_domain_owner(struct pci_dev *dev) -{ - return -1; -} -#endif #if defined(CONFIG_PCI_MSI) #if defined(CONFIG_PCI_XEN) diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c index 10562885f5fc..af3ba08b684b 100644 --- a/arch/x86/kernel/aperture_64.c +++ b/arch/x86/kernel/aperture_64.c @@ -73,12 +73,23 @@ static int gart_mem_pfn_is_ram(unsigned long pfn) (pfn >= aperture_pfn_start + aperture_page_count)); } +#ifdef CONFIG_PROC_VMCORE +static bool gart_oldmem_pfn_is_ram(struct vmcore_cb *cb, unsigned long pfn) +{ + return !!gart_mem_pfn_is_ram(pfn); +} + +static struct vmcore_cb gart_vmcore_cb = { + .pfn_is_ram = gart_oldmem_pfn_is_ram, +}; +#endif + static void __init exclude_from_core(u64 aper_base, u32 aper_order) { aperture_pfn_start = aper_base >> PAGE_SHIFT; aperture_page_count = (32 * 1024 * 1024) << aper_order >> PAGE_SHIFT; #ifdef CONFIG_PROC_VMCORE - WARN_ON(register_oldmem_pfn_is_ram(&gart_mem_pfn_is_ram)); + register_vmcore_cb(&gart_vmcore_cb); #endif #ifdef CONFIG_PROC_KCORE WARN_ON(register_mem_pfn_is_ram(&gart_mem_pfn_is_ram)); diff --git a/arch/x86/kernel/doublefault_32.c b/arch/x86/kernel/doublefault_32.c index d1d49e3d536b..3b58d8703094 100644 --- a/arch/x86/kernel/doublefault_32.c +++ b/arch/x86/kernel/doublefault_32.c @@ -77,9 +77,6 @@ asmlinkage noinstr void __noreturn doublefault_shim(void) * some way to reconstruct CR3. We could make a credible guess based * on cpu_tlbstate, but that would be racy and would not account for * PTI. - * - * Instead, don't bother. We can return through - * rewind_stack_do_exit() instead. */ panic("cannot return from double fault\n"); } diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c index ebc45360ffd4..7157c2df3bc2 100644 --- a/arch/x86/kernel/paravirt.c +++ b/arch/x86/kernel/paravirt.c @@ -46,6 +46,17 @@ asm (".pushsection .entry.text, \"ax\"\n" ".type _paravirt_nop, @function\n\t" ".popsection"); +/* stub always returning 0. */ +asm (".pushsection .entry.text, \"ax\"\n" + ".global paravirt_ret0\n" + "paravirt_ret0:\n\t" + "xor %" _ASM_AX ", %" _ASM_AX ";\n\t" + "ret\n\t" + ".size paravirt_ret0, . - paravirt_ret0\n\t" + ".type paravirt_ret0, @function\n\t" + ".popsection"); + + void __init default_banner(void) { printk(KERN_INFO "Booting paravirtualized kernel on %s\n", @@ -53,7 +64,7 @@ void __init default_banner(void) } /* Undefined instruction for dealing with missing ops pointers. */ -static void paravirt_BUG(void) +noinstr void paravirt_BUG(void) { BUG(); } diff --git a/arch/x86/kernel/unwind_orc.c b/arch/x86/kernel/unwind_orc.c index e6f7592790af..2de3c8c5eba9 100644 --- a/arch/x86/kernel/unwind_orc.c +++ b/arch/x86/kernel/unwind_orc.c @@ -175,7 +175,7 @@ static struct orc_entry *orc_find(unsigned long ip) } /* vmlinux .init slow lookup: */ - if (init_kernel_text(ip)) + if (is_kernel_inittext(ip)) return __orc_find(__start_orc_unwind_ip, __start_orc_unwind, __stop_orc_unwind_ip - __start_orc_unwind_ip, ip); diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c index e5a7a10a0164..f14f69d7aa3c 100644 --- a/arch/x86/kernel/vm86_32.c +++ b/arch/x86/kernel/vm86_32.c @@ -106,10 +106,8 @@ void save_v86_state(struct kernel_vm86_regs *regs, int retval) */ local_irq_enable(); - if (!vm86 || !vm86->user_vm86) { - pr_alert("no user_vm86: BAD\n"); - do_exit(SIGSEGV); - } + BUG_ON(!vm86 || !vm86->user_vm86); + set_flags(regs->pt.flags, VEFLAGS, X86_EFLAGS_VIF | vm86->veflags_mask); user = vm86->user_vm86; @@ -142,6 +140,7 @@ void save_v86_state(struct kernel_vm86_regs *regs, int retval) user_access_end(); +exit_vm86: preempt_disable(); tsk->thread.sp0 = vm86->saved_sp0; tsk->thread.sysenter_cs = __KERNEL_CS; @@ -161,7 +160,8 @@ Efault_end: user_access_end(); Efault: pr_alert("could not access userspace vm86 info\n"); - do_exit(SIGSEGV); + force_fatal_sig(SIGSEGV); + goto exit_vm86; } static int do_vm86_irq_handling(int subfunction, int irqnumber); diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index 5cd7ea6d645c..d4e2648a1dfb 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c @@ -238,11 +238,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base) } } -/* - * The <linux/kallsyms.h> already defines is_kernel_text, - * using '__' prefix not to get in conflict. - */ -static inline int __is_kernel_text(unsigned long addr) +static inline int is_x86_32_kernel_text(unsigned long addr) { if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end) return 1; @@ -333,8 +329,8 @@ repeat: addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE + PAGE_OFFSET + PAGE_SIZE-1; - if (__is_kernel_text(addr) || - __is_kernel_text(addr2)) + if (is_x86_32_kernel_text(addr) || + is_x86_32_kernel_text(addr2)) prot = PAGE_KERNEL_LARGE_EXEC; pages_2m++; @@ -359,7 +355,7 @@ repeat: */ pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR); - if (__is_kernel_text(addr)) + if (is_x86_32_kernel_text(addr)) prot = PAGE_KERNEL_EXEC; pages_4k++; @@ -789,7 +785,7 @@ static void mark_nxdata_nx(void) */ unsigned long start = PFN_ALIGN(_etext); /* - * This comes from __is_kernel_text upper limit. Also HPAGE where used: + * This comes from is_x86_32_kernel_text upper limit. Also HPAGE where used: */ unsigned long size = (((unsigned long)__init_end + HPAGE_SIZE) & HPAGE_MASK) - start; diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c index 5debe4ac6f81..12da00558631 100644 --- a/arch/x86/pci/xen.c +++ b/arch/x86/pci/xen.c @@ -23,6 +23,7 @@ #include <xen/features.h> #include <xen/events.h> +#include <xen/pci.h> #include <asm/xen/pci.h> #include <asm/xen/cpuid.h> #include <asm/apic.h> @@ -585,78 +586,3 @@ int __init pci_xen_initial_domain(void) } #endif -#ifdef CONFIG_XEN_DOM0 - -struct xen_device_domain_owner { - domid_t domain; - struct pci_dev *dev; - struct list_head list; -}; - -static DEFINE_SPINLOCK(dev_domain_list_spinlock); -static struct list_head dev_domain_list = LIST_HEAD_INIT(dev_domain_list); - -static struct xen_device_domain_owner *find_device(struct pci_dev *dev) -{ - struct xen_device_domain_owner *owner; - - list_for_each_entry(owner, &dev_domain_list, list) { - if (owner->dev == dev) - return owner; - } - return NULL; -} - -int xen_find_device_domain_owner(struct pci_dev *dev) -{ - struct xen_device_domain_owner *owner; - int domain = -ENODEV; - - spin_lock(&dev_domain_list_spinlock); - owner = find_device(dev); - if (owner) - domain = owner->domain; - spin_unlock(&dev_domain_list_spinlock); - return domain; -} -EXPORT_SYMBOL_GPL(xen_find_device_domain_owner); - -int xen_register_device_domain_owner(struct pci_dev *dev, uint16_t domain) -{ - struct xen_device_domain_owner *owner; - - owner = kzalloc(sizeof(struct xen_device_domain_owner), GFP_KERNEL); - if (!owner) - return -ENODEV; - - spin_lock(&dev_domain_list_spinlock); - if (find_device(dev)) { - spin_unlock(&dev_domain_list_spinlock); - kfree(owner); - return -EEXIST; - } - owner->domain = domain; - owner->dev = dev; - list_add_tail(&owner->list, &dev_domain_list); - spin_unlock(&dev_domain_list_spinlock); - return 0; -} -EXPORT_SYMBOL_GPL(xen_register_device_domain_owner); - -int xen_unregister_device_domain_owner(struct pci_dev *dev) -{ - struct xen_device_domain_owner *owner; - - spin_lock(&dev_domain_list_spinlock); - owner = find_device(dev); - if (!owner) { - spin_unlock(&dev_domain_list_spinlock); - return -ENODEV; - } - list_del(&owner->list); - spin_unlock(&dev_domain_list_spinlock); - kfree(owner); - return 0; -} -EXPORT_SYMBOL_GPL(xen_unregister_device_domain_owner); -#endif /* CONFIG_XEN_DOM0 */ diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index 95d970359e17..30c6e986a6cd 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c @@ -31,25 +31,10 @@ EXPORT_SYMBOL_GPL(hypercall_page); * Pointer to the xen_vcpu_info structure or * &HYPERVISOR_shared_info->vcpu_info[cpu]. See xen_hvm_init_shared_info * and xen_vcpu_setup for details. By default it points to share_info->vcpu_info - * but if the hypervisor supports VCPUOP_register_vcpu_info then it can point - * to xen_vcpu_info. The pointer is used in __xen_evtchn_do_upcall to - * acknowledge pending events. - * Also more subtly it is used by the patched version of irq enable/disable - * e.g. xen_irq_enable_direct and xen_iret in PV mode. - * - * The desire to be able to do those mask/unmask operations as a single - * instruction by using the per-cpu offset held in %gs is the real reason - * vcpu info is in a per-cpu pointer and the original reason for this - * hypercall. - * + * but during boot it is switched to point to xen_vcpu_info. + * The pointer is used in __xen_evtchn_do_upcall to acknowledge pending events. */ DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu); - -/* - * Per CPU pages used if hypervisor supports VCPUOP_register_vcpu_info - * hypercall. This can be used both in PV and PVHVM mode. The structure - * overrides the default per_cpu(xen_vcpu, cpu) value. - */ DEFINE_PER_CPU(struct vcpu_info, xen_vcpu_info); /* Linux <-> Xen vCPU id mapping */ @@ -84,21 +69,6 @@ EXPORT_SYMBOL(xen_start_flags); */ struct shared_info *HYPERVISOR_shared_info = &xen_dummy_shared_info; -/* - * Flag to determine whether vcpu info placement is available on all - * VCPUs. We assume it is to start with, and then set it to zero on - * the first failure. This is because it can succeed on some VCPUs - * and not others, since it can involve hypervisor memory allocation, - * or because the guest failed to guarantee all the appropriate - * constraints on all VCPUs (ie buffer can't cross a page boundary). - * - * Note that any particular CPU may be using a placed vcpu structure, - * but we can only optimise if the all are. - * - * 0: not available, 1: available - */ -int xen_have_vcpu_info_placement = 1; - static int xen_cpu_up_online(unsigned int cpu) { xen_init_lock_cpu(cpu); @@ -124,10 +94,8 @@ int xen_cpuhp_setup(int (*cpu_up_prepare_cb)(unsigned int), return rc >= 0 ? 0 : rc; } -static int xen_vcpu_setup_restore(int cpu) +static void xen_vcpu_setup_restore(int cpu) { - int rc = 0; - /* Any per_cpu(xen_vcpu) is stale, so reset it */ xen_vcpu_info_reset(cpu); @@ -136,11 +104,8 @@ static int xen_vcpu_setup_restore(int cpu) * be handled by hotplug. */ if (xen_pv_domain() || - (xen_hvm_domain() && cpu_online(cpu))) { - rc = xen_vcpu_setup(cpu); - } - - return rc; + (xen_hvm_domain() && cpu_online(cpu))) + xen_vcpu_setup(cpu); } /* @@ -150,7 +115,7 @@ static int xen_vcpu_setup_restore(int cpu) */ void xen_vcpu_restore(void) { - int cpu, rc; + int cpu; for_each_possible_cpu(cpu) { bool other_cpu = (cpu != smp_processor_id()); @@ -170,20 +135,9 @@ void xen_vcpu_restore(void) if (xen_pv_domain() || xen_feature(XENFEAT_hvm_safe_pvclock)) xen_setup_runstate_info(cpu); - rc = xen_vcpu_setup_restore(cpu); - if (rc) - pr_emerg_once("vcpu restore failed for cpu=%d err=%d. " - "System will hang.\n", cpu, rc); - /* - * In case xen_vcpu_setup_restore() fails, do not bring up the - * VCPU. This helps us avoid the resulting OOPS when the VCPU - * accesses pvclock_vcpu_time via xen_vcpu (which is NULL.) - * Note that this does not improve the situation much -- now the - * VM hangs instead of OOPSing -- with the VCPUs that did not - * fail, spinning in stop_machine(), waiting for the failed - * VCPUs to come up. - */ - if (other_cpu && is_up && (rc == 0) && + xen_vcpu_setup_restore(cpu); + + if (other_cpu && is_up && HYPERVISOR_vcpu_op(VCPUOP_up, xen_vcpu_nr(cpu), NULL)) BUG(); } @@ -200,7 +154,7 @@ void xen_vcpu_info_reset(int cpu) } } -int xen_vcpu_setup(int cpu) +void xen_vcpu_setup(int cpu) { struct vcpu_register_vcpu_info info; int err; @@ -221,44 +175,26 @@ int xen_vcpu_setup(int cpu) */ if (xen_hvm_domain()) { if (per_cpu(xen_vcpu, cpu) == &per_cpu(xen_vcpu_info, cpu)) - return 0; + return; } - if (xen_have_vcpu_info_placement) { - vcpup = &per_cpu(xen_vcpu_info, cpu); - info.mfn = arbitrary_virt_to_mfn(vcpup); - info.offset = offset_in_page(vcpup); - - /* - * Check to see if the hypervisor will put the vcpu_info - * structure where we want it, which allows direct access via - * a percpu-variable. - * N.B. This hypercall can _only_ be called once per CPU. - * Subsequent calls will error out with -EINVAL. This is due to - * the fact that hypervisor has no unregister variant and this - * hypercall does not allow to over-write info.mfn and - * info.offset. - */ - err = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_info, - xen_vcpu_nr(cpu), &info); - - if (err) { - pr_warn_once("register_vcpu_info failed: cpu=%d err=%d\n", - cpu, err); - xen_have_vcpu_info_placement = 0; - } else { - /* - * This cpu is using the registered vcpu info, even if - * later ones fail to. - */ - per_cpu(xen_vcpu, cpu) = vcpup; - } - } + vcpup = &per_cpu(xen_vcpu_info, cpu); + info.mfn = arbitrary_virt_to_mfn(vcpup); + info.offset = offset_in_page(vcpup); - if (!xen_have_vcpu_info_placement) - xen_vcpu_info_reset(cpu); + /* + * N.B. This hypercall can _only_ be called once per CPU. + * Subsequent calls will error out with -EINVAL. This is due to + * the fact that hypervisor has no unregister variant and this + * hypercall does not allow to over-write info.mfn and + * info.offset. + */ + err = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_info, xen_vcpu_nr(cpu), + &info); + if (err) + panic("register_vcpu_info failed: cpu=%d err=%d\n", cpu, err); - return ((per_cpu(xen_vcpu, cpu) == NULL) ? -ENODEV : 0); + per_cpu(xen_vcpu, cpu) = vcpup; } void __init xen_banner(void) diff --git a/arch/x86/xen/enlighten_hvm.c b/arch/x86/xen/enlighten_hvm.c index e68ea5f4ad1c..42300941ec29 100644 --- a/arch/x86/xen/enlighten_hvm.c +++ b/arch/x86/xen/enlighten_hvm.c @@ -163,9 +163,9 @@ static int xen_cpu_up_prepare_hvm(unsigned int cpu) per_cpu(xen_vcpu_id, cpu) = cpu_acpi_id(cpu); else per_cpu(xen_vcpu_id, cpu) = cpu; - rc = xen_vcpu_setup(cpu); - if (rc || !xen_have_vector_callback) - return rc; + xen_vcpu_setup(cpu); + if (!xen_have_vector_callback) + return 0; if (xen_feature(XENFEAT_hvm_safe_pvclock)) xen_setup_timer(cpu); diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c index 4f63117f09bb..5004feb16783 100644 --- a/arch/x86/xen/enlighten_pv.c +++ b/arch/x86/xen/enlighten_pv.c @@ -27,7 +27,6 @@ #include <linux/export.h> #include <linux/mm.h> #include <linux/page-flags.h> -#include <linux/highmem.h> #include <linux/pci.h> #include <linux/gfp.h> #include <linux/edd.h> @@ -993,31 +992,13 @@ void __init xen_setup_vcpu_info_placement(void) for_each_possible_cpu(cpu) { /* Set up direct vCPU id mapping for PV guests. */ per_cpu(xen_vcpu_id, cpu) = cpu; - - /* - * xen_vcpu_setup(cpu) can fail -- in which case it - * falls back to the shared_info version for cpus - * where xen_vcpu_nr(cpu) < MAX_VIRT_CPUS. - * - * xen_cpu_up_prepare_pv() handles the rest by failing - * them in hotplug. - */ - (void) xen_vcpu_setup(cpu); + xen_vcpu_setup(cpu); } - /* - * xen_vcpu_setup managed to place the vcpu_info within the - * percpu area for all cpus, so make use of it. - */ - if (xen_have_vcpu_info_placement) { - pv_ops.irq.save_fl = __PV_IS_CALLEE_SAVE(xen_save_fl_direct); - pv_ops.irq.irq_disable = - __PV_IS_CALLEE_SAVE(xen_irq_disable_direct); - pv_ops.irq.irq_enable = - __PV_IS_CALLEE_SAVE(xen_irq_enable_direct); - pv_ops.mmu.read_cr2 = - __PV_IS_CALLEE_SAVE(xen_read_cr2_direct); - } + pv_ops.irq.save_fl = __PV_IS_CALLEE_SAVE(xen_save_fl_direct); + pv_ops.irq.irq_disable = __PV_IS_CALLEE_SAVE(xen_irq_disable_direct); + pv_ops.irq.irq_enable = __PV_IS_CALLEE_SAVE(xen_irq_enable_direct); + pv_ops.mmu.read_cr2 = __PV_IS_CALLEE_SAVE(xen_read_cr2_direct); } static const struct pv_info xen_info __initconst = { @@ -1247,12 +1228,6 @@ asmlinkage __visible void __init xen_start_kernel(void) __supported_pte_mask &= ~_PAGE_GLOBAL; __default_kernel_pte_mask &= ~_PAGE_GLOBAL; - /* - * Prevent page tables from being allocated in highmem, even - * if CONFIG_HIGHPTE is enabled. - */ - __userpte_alloc_gfp &= ~__GFP_HIGHMEM; - /* Get mfn list */ xen_build_dynamic_phys_to_machine(); diff --git a/arch/x86/xen/irq.c b/arch/x86/xen/irq.c index 4fe387e520af..06c3c2fb4b06 100644 --- a/arch/x86/xen/irq.c +++ b/arch/x86/xen/irq.c @@ -24,60 +24,6 @@ noinstr void xen_force_evtchn_callback(void) (void)HYPERVISOR_xen_version(0, NULL); } -asmlinkage __visible noinstr unsigned long xen_save_fl(void) -{ - struct vcpu_info *vcpu; - unsigned long flags; - - vcpu = this_cpu_read(xen_vcpu); - - /* flag has opposite sense of mask */ - flags = !vcpu->evtchn_upcall_mask; - - /* convert to IF type flag - -0 -> 0x00000000 - -1 -> 0xffffffff - */ - return (-flags) & X86_EFLAGS_IF; -} -__PV_CALLEE_SAVE_REGS_THUNK(xen_save_fl, ".noinstr.text"); - -asmlinkage __visible noinstr void xen_irq_disable(void) -{ - /* There's a one instruction preempt window here. We need to - make sure we're don't switch CPUs between getting the vcpu - pointer and updating the mask. */ - preempt_disable(); - this_cpu_read(xen_vcpu)->evtchn_upcall_mask = 1; - preempt_enable_no_resched(); -} -__PV_CALLEE_SAVE_REGS_THUNK(xen_irq_disable, ".noinstr.text"); - -asmlinkage __visible noinstr void xen_irq_enable(void) -{ - struct vcpu_info *vcpu; - - /* - * We may be preempted as soon as vcpu->evtchn_upcall_mask is - * cleared, so disable preemption to ensure we check for - * events on the VCPU we are still running on. - */ - preempt_disable(); - - vcpu = this_cpu_read(xen_vcpu); - vcpu->evtchn_upcall_mask = 0; - - /* Doesn't matter if we get preempted here, because any - pending event will get dealt with anyway. */ - - barrier(); /* unmask then check (avoid races) */ - if (unlikely(vcpu->evtchn_upcall_pending)) - xen_force_evtchn_callback(); - - preempt_enable(); -} -__PV_CALLEE_SAVE_REGS_THUNK(xen_irq_enable, ".noinstr.text"); - static void xen_safe_halt(void) { /* Blocking includes an implicit local_irq_enable(). */ @@ -96,10 +42,10 @@ static void xen_halt(void) static const typeof(pv_ops) xen_irq_ops __initconst = { .irq = { - - .save_fl = PV_CALLEE_SAVE(xen_save_fl), - .irq_disable = PV_CALLEE_SAVE(xen_irq_disable), - .irq_enable = PV_CALLEE_SAVE(xen_irq_enable), + /* Initial interrupt flag handling only called while interrupts off. */ + .save_fl = __PV_IS_CALLEE_SAVE(paravirt_ret0), + .irq_disable = __PV_IS_CALLEE_SAVE(paravirt_nop), + .irq_enable = __PV_IS_CALLEE_SAVE(paravirt_BUG), .safe_halt = xen_safe_halt, .halt = xen_halt, diff --git a/arch/x86/xen/mmu_hvm.c b/arch/x86/xen/mmu_hvm.c index 57409373750f..509bdee3ab90 100644 --- a/arch/x86/xen/mmu_hvm.c +++ b/arch/x86/xen/mmu_hvm.c @@ -9,39 +9,28 @@ #ifdef CONFIG_PROC_VMCORE /* - * This function is used in two contexts: - * - the kdump kernel has to check whether a pfn of the crashed kernel - * was a ballooned page. vmcore is using this function to decide - * whether to access a pfn of the crashed kernel. - * - the kexec kernel has to check whether a pfn was ballooned by the - * previous kernel. If the pfn is ballooned, handle it properly. - * Returns 0 if the pfn is not backed by a RAM page, the caller may + * The kdump kernel has to check whether a pfn of the crashed kernel + * was a ballooned page. vmcore is using this function to decide + * whether to access a pfn of the crashed kernel. + * Returns "false" if the pfn is not backed by a RAM page, the caller may * handle the pfn special in this case. */ -static int xen_oldmem_pfn_is_ram(unsigned long pfn) +static bool xen_vmcore_pfn_is_ram(struct vmcore_cb *cb, unsigned long pfn) { struct xen_hvm_get_mem_type a = { .domid = DOMID_SELF, .pfn = pfn, }; - int ram; - if (HYPERVISOR_hvm_op(HVMOP_get_mem_type, &a)) - return -ENXIO; - - switch (a.mem_type) { - case HVMMEM_mmio_dm: - ram = 0; - break; - case HVMMEM_ram_rw: - case HVMMEM_ram_ro: - default: - ram = 1; - break; + if (HYPERVISOR_hvm_op(HVMOP_get_mem_type, &a)) { + pr_warn_once("Unexpected HVMOP_get_mem_type failure\n"); + return true; } - - return ram; + return a.mem_type != HVMMEM_mmio_dm; } +static struct vmcore_cb xen_vmcore_cb = { + .pfn_is_ram = xen_vmcore_pfn_is_ram, +}; #endif static void xen_hvm_exit_mmap(struct mm_struct *mm) @@ -75,6 +64,6 @@ void __init xen_hvm_init_mmu_ops(void) if (is_pagetable_dying_supported()) pv_ops.mmu.exit_mmap = xen_hvm_exit_mmap; #ifdef CONFIG_PROC_VMCORE - WARN_ON(register_oldmem_pfn_is_ram(&xen_oldmem_pfn_is_ram)); + register_vmcore_cb(&xen_vmcore_cb); #endif } diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c index cdbf4822f431..00354866921b 100644 --- a/arch/x86/xen/mmu_pv.c +++ b/arch/x86/xen/mmu_pv.c @@ -41,7 +41,6 @@ * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007 */ #include <linux/sched/mm.h> -#include <linux/highmem.h> #include <linux/debugfs.h> #include <linux/bug.h> #include <linux/vmalloc.h> @@ -86,8 +85,10 @@ #include "mmu.h" #include "debugfs.h" +#ifdef CONFIG_X86_VSYSCALL_EMULATION /* l3 pud for userspace vsyscall mapping */ static pud_t level3_user_vsyscall[PTRS_PER_PUD] __page_aligned_bss; +#endif /* * Protects atomic reservation decrease/increase against concurrent increases. @@ -241,9 +242,11 @@ static void xen_set_pmd(pmd_t *ptr, pmd_t val) * Associate a virtual page frame with a given physical page frame * and protection flags for that frame. */ -void set_pte_mfn(unsigned long vaddr, unsigned long mfn, pgprot_t flags) +void __init set_pte_mfn(unsigned long vaddr, unsigned long mfn, pgprot_t flags) { - set_pte_vaddr(vaddr, mfn_pte(mfn, flags)); + if (HYPERVISOR_update_va_mapping(vaddr, mfn_pte(mfn, flags), + UVMF_INVLPG)) + BUG(); } static bool xen_batched_set_pte(pte_t *ptep, pte_t pteval) @@ -789,7 +792,9 @@ static void __init xen_mark_pinned(struct mm_struct *mm, struct page *page, static void __init xen_after_bootmem(void) { static_branch_enable(&xen_struct_pages_ready); +#ifdef CONFIG_X86_VSYSCALL_EMULATION SetPagePinned(virt_to_page(level3_user_vsyscall)); +#endif xen_pgd_walk(&init_mm, xen_mark_pinned, FIXADDR_TOP); } @@ -1192,6 +1197,13 @@ static void __init xen_pagetable_p2m_setup(void) static void __init xen_pagetable_init(void) { + /* + * The majority of further PTE writes is to pagetables already + * announced as such to Xen. Hence it is more efficient to use + * hypercalls for these updates. + */ + pv_ops.mmu.set_pte = __xen_set_pte; + paging_init(); xen_post_allocator_init(); @@ -1421,10 +1433,18 @@ static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd) * * Many of these PTE updates are done on unpinned and writable pages * and doing a hypercall for these is unnecessary and expensive. At - * this point it is not possible to tell if a page is pinned or not, - * so always write the PTE directly and rely on Xen trapping and + * this point it is rarely possible to tell if a page is pinned, so + * mostly write the PTE directly and rely on Xen trapping and * emulating any updates as necessary. */ +static void __init xen_set_pte_init(pte_t *ptep, pte_t pte) +{ + if (unlikely(is_early_ioremap_ptep(ptep))) + __xen_set_pte(ptep, pte); + else + native_set_pte(ptep, pte); +} + __visible pte_t xen_make_pte_init(pteval_t pte) { unsigned long pfn; @@ -1446,11 +1466,6 @@ __visible pte_t xen_make_pte_init(pteval_t pte) } PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte_init); -static void __init xen_set_pte_init(pte_t *ptep, pte_t pte) -{ - __xen_set_pte(ptep, pte); -} - /* Early in boot, while setting up the initial pagetable, assume everything is pinned. */ static void __init xen_alloc_pte_init(struct mm_struct *mm, unsigned long pfn) @@ -1750,7 +1765,6 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn) set_page_prot(init_top_pgt, PAGE_KERNEL_RO); set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO); set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO); - set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO); set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO); set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO); set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO); @@ -1767,6 +1781,13 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn) /* Unpin Xen-provided one */ pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd))); +#ifdef CONFIG_X86_VSYSCALL_EMULATION + /* Pin user vsyscall L3 */ + set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO); + pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, + PFN_DOWN(__pa_symbol(level3_user_vsyscall))); +#endif + /* * At this stage there can be no user pgd, and no page structure to * attach it to, so make sure we just set kernel pgd. @@ -1999,6 +2020,7 @@ static unsigned char dummy_mapping[PAGE_SIZE] __page_aligned_bss; static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot) { pte_t pte; + unsigned long vaddr; phys >>= PAGE_SHIFT; @@ -2039,15 +2061,15 @@ static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot) break; } - __native_set_fixmap(idx, pte); + vaddr = __fix_to_virt(idx); + if (HYPERVISOR_update_va_mapping(vaddr, pte, UVMF_INVLPG)) + BUG(); #ifdef CONFIG_X86_VSYSCALL_EMULATION /* Replicate changes to map the vsyscall page into the user pagetable vsyscall mapping. */ - if (idx == VSYSCALL_PAGE) { - unsigned long vaddr = __fix_to_virt(idx); + if (idx == VSYSCALL_PAGE) set_pte_vaddr_pud(level3_user_vsyscall, vaddr, pte); - } #endif } diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c index f387fc7e5250..af216feb63d9 100644 --- a/arch/x86/xen/setup.c +++ b/arch/x86/xen/setup.c @@ -306,10 +306,6 @@ static void __init xen_update_mem_tables(unsigned long pfn, unsigned long mfn) BUG(); } - /* Update kernel mapping, but not for highmem. */ - if (pfn >= PFN_UP(__pa(high_memory - 1))) - return; - if (HYPERVISOR_update_va_mapping((unsigned long)__va(pfn << PAGE_SHIFT), mfn_pte(mfn, PAGE_KERNEL), 0)) { WARN(1, "Failed to update kernel mapping for mfn=%ld pfn=%ld\n", @@ -429,13 +425,13 @@ static unsigned long __init xen_set_identity_and_remap_chunk( } /* - * If the PFNs are currently mapped, the VA mapping also needs - * to be updated to be 1:1. + * If the PFNs are currently mapped, their VA mappings need to be + * zapped. */ for (pfn = start_pfn; pfn <= max_pfn_mapped && pfn < end_pfn; pfn++) (void)HYPERVISOR_update_va_mapping( (unsigned long)__va(pfn << PAGE_SHIFT), - mfn_pte(pfn, PAGE_KERNEL_IO), 0); + native_make_pte(0), 0); return remap_pfn; } diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c index c1b2f764b29a..c3e1f9a7d43a 100644 --- a/arch/x86/xen/smp.c +++ b/arch/x86/xen/smp.c @@ -121,34 +121,10 @@ int xen_smp_intr_init(unsigned int cpu) void __init xen_smp_cpus_done(unsigned int max_cpus) { - int cpu, rc, count = 0; - if (xen_hvm_domain()) native_smp_cpus_done(max_cpus); else calculate_max_logical_packages(); - - if (xen_have_vcpu_info_placement) - return; - - for_each_online_cpu(cpu) { - if (xen_vcpu_nr(cpu) < MAX_VIRT_CPUS) - continue; - - rc = remove_cpu(cpu); - - if (rc == 0) { - /* - * Reset vcpu_info so this cpu cannot be onlined again. - */ - xen_vcpu_info_reset(cpu); - count++; - } else { - pr_warn("%s: failed to bring CPU %d down, error %d\n", - __func__, cpu, rc); - } - } - WARN(count, "%s: brought %d CPUs offline\n", __func__, count); } void xen_smp_send_reschedule(int cpu) @@ -268,20 +244,16 @@ void xen_send_IPI_allbutself(int vector) static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id) { - irq_enter(); generic_smp_call_function_interrupt(); inc_irq_stat(irq_call_count); - irq_exit(); return IRQ_HANDLED; } static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id) { - irq_enter(); generic_smp_call_function_single_interrupt(); inc_irq_stat(irq_call_count); - irq_exit(); return IRQ_HANDLED; } diff --git a/arch/x86/xen/smp_pv.c b/arch/x86/xen/smp_pv.c index 7ed56c6075b0..9e55bcbfcd33 100644 --- a/arch/x86/xen/smp_pv.c +++ b/arch/x86/xen/smp_pv.c @@ -458,10 +458,8 @@ static void xen_pv_stop_other_cpus(int wait) static irqreturn_t xen_irq_work_interrupt(int irq, void *dev_id) { - irq_enter(); irq_work_run(); inc_irq_stat(apic_irq_work_irqs); - irq_exit(); return IRQ_HANDLED; } diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S index 9e27b86a0c31..6a64496edefb 100644 --- a/arch/x86/xen/xen-head.S +++ b/arch/x86/xen/xen-head.S @@ -45,13 +45,13 @@ SYM_CODE_START(startup_xen) /* Clear .bss */ xor %eax,%eax - mov $__bss_start, %_ASM_DI - mov $__bss_stop, %_ASM_CX - sub %_ASM_DI, %_ASM_CX - shr $__ASM_SEL(2, 3), %_ASM_CX - rep __ASM_SIZE(stos) + mov $__bss_start, %rdi + mov $__bss_stop, %rcx + sub %rdi, %rcx + shr $3, %rcx + rep stosq - mov %_ASM_SI, xen_start_info + mov %rsi, xen_start_info mov initial_stack(%rip), %rsp /* Set up %gs. diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h index 8bc8b72a205d..fd0fec6e92f4 100644 --- a/arch/x86/xen/xen-ops.h +++ b/arch/x86/xen/xen-ops.h @@ -76,9 +76,7 @@ irqreturn_t xen_debug_interrupt(int irq, void *dev_id); bool xen_vcpu_stolen(int vcpu); -extern int xen_have_vcpu_info_placement; - -int xen_vcpu_setup(int cpu); +void xen_vcpu_setup(int cpu); void xen_vcpu_info_reset(int cpu); void xen_setup_vcpu_info_placement(void); diff --git a/arch/xtensa/Makefile b/arch/xtensa/Makefile index 96714ef7c89e..9778216d6e09 100644 --- a/arch/xtensa/Makefile +++ b/arch/xtensa/Makefile @@ -7,9 +7,7 @@ # Copyright (C) 2014 Cadence Design Systems Inc. # # This file is included by the global makefile so that you can add your own -# architecture-specific flags and dependencies. Remember to do have actions -# for "archclean" and "archdep" for cleaning up and making dependencies for -# this architecture +# architecture-specific flags and dependencies. # Core configuration. # (Use VAR=<xtensa_config> to use another default compiler.) diff --git a/arch/xtensa/boot/boot-elf/bootstrap.S b/arch/xtensa/boot/boot-elf/bootstrap.S index 99e98c9bae41..2dd28931d699 100644 --- a/arch/xtensa/boot/boot-elf/bootstrap.S +++ b/arch/xtensa/boot/boot-elf/bootstrap.S @@ -42,12 +42,14 @@ _bootparam: .align 4 _SetupMMU: +#if XCHAL_HAVE_WINDOWED movi a0, 0 wsr a0, windowbase rsync movi a0, 1 wsr a0, windowstart rsync +#endif movi a0, 0x1F wsr a0, ps rsync diff --git a/arch/xtensa/boot/boot-redboot/bootstrap.S b/arch/xtensa/boot/boot-redboot/bootstrap.S index 48ba5a232d94..3ed94ad35000 100644 --- a/arch/xtensa/boot/boot-redboot/bootstrap.S +++ b/arch/xtensa/boot/boot-redboot/bootstrap.S @@ -3,6 +3,7 @@ #include <asm/regs.h> #include <asm/asmmacro.h> #include <asm/cacheasm.h> +#include <asm/processor.h> /* * RB-Data: RedBoot data/bss * P: Boot-Parameters @@ -36,7 +37,7 @@ .globl __start /* this must be the first byte of the loader! */ __start: - entry sp, 32 # we do not intend to return + abi_entry(32) # we do not intend to return _call0 _start __start_a0: .align 4 @@ -55,17 +56,19 @@ _start: movi a4, 1 wsr a4, ps rsync - +#if XCHAL_HAVE_WINDOWED rsr a5, windowbase ssl a5 sll a4, a4 wsr a4, windowstart rsync - - movi a4, 0x00040000 +#endif + movi a4, KERNEL_PS_WOE_MASK wsr a4, ps rsync +KABI_C0 mov abi_saved0, abi_arg0 + /* copy the loader to its address * Note: The loader itself is a very small piece, so we assume we * don't partially overlap. We also assume (even more important) @@ -168,52 +171,52 @@ _reloc: movi a3, __image_load sub a4, a3, a4 - add a8, a0, a4 + add abi_arg2, a0, a4 # a1 Stack # a8(a4) Load address of the image - movi a6, _image_start - movi a10, _image_end - movi a7, 0x1000000 - sub a11, a10, a6 - movi a9, complen - s32i a11, a9, 0 + movi abi_arg0, _image_start + movi abi_arg4, _image_end + movi abi_arg1, 0x1000000 + sub abi_tmp0, abi_arg4, abi_arg0 + movi abi_arg3, complen + s32i abi_tmp0, abi_arg3, 0 movi a0, 0 - # a6 destination - # a7 maximum size of destination - # a8 source - # a9 ptr to length + # abi_arg0 destination + # abi_arg1 maximum size of destination + # abi_arg2 source + # abi_arg3 ptr to length .extern gunzip - movi a4, gunzip - beqz a4, 1f + movi abi_tmp0, gunzip + beqz abi_tmp0, 1f - callx4 a4 + abi_callx abi_tmp0 j 2f - # a6 destination start - # a7 maximum size of destination - # a8 source start - # a9 ptr to length - # a10 destination end + # abi_arg0 destination start + # abi_arg1 maximum size of destination + # abi_arg2 source start + # abi_arg3 ptr to length + # abi_arg4 destination end 1: - l32i a9, a8, 0 - l32i a11, a8, 4 - s32i a9, a6, 0 - s32i a11, a6, 4 - l32i a9, a8, 8 - l32i a11, a8, 12 - s32i a9, a6, 8 - s32i a11, a6, 12 - addi a6, a6, 16 - addi a8, a8, 16 - blt a6, a10, 1b + l32i abi_tmp0, abi_arg2, 0 + l32i abi_tmp1, abi_arg2, 4 + s32i abi_tmp0, abi_arg0, 0 + s32i abi_tmp1, abi_arg0, 4 + l32i abi_tmp0, abi_arg2, 8 + l32i abi_tmp1, abi_arg2, 12 + s32i abi_tmp0, abi_arg0, 8 + s32i abi_tmp1, abi_arg0, 12 + addi abi_arg0, abi_arg0, 16 + addi abi_arg2, abi_arg2, 16 + blt abi_arg0, abi_arg4, 1b /* jump to the kernel */ @@ -230,6 +233,7 @@ _reloc: # a2 Boot parameter list +KABI_C0 mov abi_arg0, abi_saved0 movi a0, _image_start jx a0 diff --git a/arch/xtensa/include/asm/asmmacro.h b/arch/xtensa/include/asm/asmmacro.h index bfc89e11f469..809c507d1825 100644 --- a/arch/xtensa/include/asm/asmmacro.h +++ b/arch/xtensa/include/asm/asmmacro.h @@ -194,6 +194,12 @@ #define XTENSA_STACK_ALIGNMENT 16 #if defined(__XTENSA_WINDOWED_ABI__) + +/* Assembly instructions for windowed kernel ABI. */ +#define KABI_W +/* Assembly instructions for call0 kernel ABI (will be ignored). */ +#define KABI_C0 # + #define XTENSA_FRAME_SIZE_RESERVE 16 #define XTENSA_SPILL_STACK_RESERVE 32 @@ -206,8 +212,34 @@ #define abi_ret(frame_size) retw #define abi_ret_default retw + /* direct call */ +#define abi_call call4 + /* indirect call */ +#define abi_callx callx4 + /* outgoing call argument registers */ +#define abi_arg0 a6 +#define abi_arg1 a7 +#define abi_arg2 a8 +#define abi_arg3 a9 +#define abi_arg4 a10 +#define abi_arg5 a11 + /* return value */ +#define abi_rv a6 + /* registers preserved across call */ +#define abi_saved0 a2 +#define abi_saved1 a3 + + /* none of the above */ +#define abi_tmp0 a4 +#define abi_tmp1 a5 + #elif defined(__XTENSA_CALL0_ABI__) +/* Assembly instructions for windowed kernel ABI (will be ignored). */ +#define KABI_W # +/* Assembly instructions for call0 kernel ABI. */ +#define KABI_C0 + #define XTENSA_SPILL_STACK_RESERVE 0 #define abi_entry(frame_size) __abi_entry (frame_size) @@ -233,10 +265,43 @@ #define abi_ret_default ret + /* direct call */ +#define abi_call call0 + /* indirect call */ +#define abi_callx callx0 + /* outgoing call argument registers */ +#define abi_arg0 a2 +#define abi_arg1 a3 +#define abi_arg2 a4 +#define abi_arg3 a5 +#define abi_arg4 a6 +#define abi_arg5 a7 + /* return value */ +#define abi_rv a2 + /* registers preserved across call */ +#define abi_saved0 a12 +#define abi_saved1 a13 + + /* none of the above */ +#define abi_tmp0 a8 +#define abi_tmp1 a9 + #else #error Unsupported Xtensa ABI #endif +#if defined(USER_SUPPORT_WINDOWED) +/* Assembly instructions for windowed user ABI. */ +#define UABI_W +/* Assembly instructions for call0 user ABI (will be ignored). */ +#define UABI_C0 # +#else +/* Assembly instructions for windowed user ABI (will be ignored). */ +#define UABI_W # +/* Assembly instructions for call0 user ABI. */ +#define UABI_C0 +#endif + #define __XTENSA_HANDLER .section ".exception.text", "ax" #endif /* _XTENSA_ASMMACRO_H */ diff --git a/arch/xtensa/include/asm/atomic.h b/arch/xtensa/include/asm/atomic.h index 4361fe4247e3..52da614f953c 100644 --- a/arch/xtensa/include/asm/atomic.h +++ b/arch/xtensa/include/asm/atomic.h @@ -25,15 +25,15 @@ * * Locking interrupts looks like this: * - * rsil a15, TOPLEVEL + * rsil a14, TOPLEVEL * <code> - * wsr a15, PS + * wsr a14, PS * rsync * - * Note that a15 is used here because the register allocation + * Note that a14 is used here because the register allocation * done by the compiler is not guaranteed and a window overflow * may not occur between the rsil and wsr instructions. By using - * a15 in the rsil, the machine is guaranteed to be in a state + * a14 in the rsil, the machine is guaranteed to be in a state * where no register reference will cause an overflow. */ @@ -185,15 +185,15 @@ static inline void arch_atomic_##op(int i, atomic_t * v) \ unsigned int vval; \ \ __asm__ __volatile__( \ - " rsil a15, "__stringify(TOPLEVEL)"\n" \ + " rsil a14, "__stringify(TOPLEVEL)"\n" \ " l32i %[result], %[mem]\n" \ " " #op " %[result], %[result], %[i]\n" \ " s32i %[result], %[mem]\n" \ - " wsr a15, ps\n" \ + " wsr a14, ps\n" \ " rsync\n" \ : [result] "=&a" (vval), [mem] "+m" (*v) \ : [i] "a" (i) \ - : "a15", "memory" \ + : "a14", "memory" \ ); \ } \ @@ -203,15 +203,15 @@ static inline int arch_atomic_##op##_return(int i, atomic_t * v) \ unsigned int vval; \ \ __asm__ __volatile__( \ - " rsil a15,"__stringify(TOPLEVEL)"\n" \ + " rsil a14,"__stringify(TOPLEVEL)"\n" \ " l32i %[result], %[mem]\n" \ " " #op " %[result], %[result], %[i]\n" \ " s32i %[result], %[mem]\n" \ - " wsr a15, ps\n" \ + " wsr a14, ps\n" \ " rsync\n" \ : [result] "=&a" (vval), [mem] "+m" (*v) \ : [i] "a" (i) \ - : "a15", "memory" \ + : "a14", "memory" \ ); \ \ return vval; \ @@ -223,16 +223,16 @@ static inline int arch_atomic_fetch_##op(int i, atomic_t * v) \ unsigned int tmp, vval; \ \ __asm__ __volatile__( \ - " rsil a15,"__stringify(TOPLEVEL)"\n" \ + " rsil a14,"__stringify(TOPLEVEL)"\n" \ " l32i %[result], %[mem]\n" \ " " #op " %[tmp], %[result], %[i]\n" \ " s32i %[tmp], %[mem]\n" \ - " wsr a15, ps\n" \ + " wsr a14, ps\n" \ " rsync\n" \ : [result] "=&a" (vval), [tmp] "=&a" (tmp), \ [mem] "+m" (*v) \ : [i] "a" (i) \ - : "a15", "memory" \ + : "a14", "memory" \ ); \ \ return vval; \ diff --git a/arch/xtensa/include/asm/cmpxchg.h b/arch/xtensa/include/asm/cmpxchg.h index 3699e2818efb..eb87810357ad 100644 --- a/arch/xtensa/include/asm/cmpxchg.h +++ b/arch/xtensa/include/asm/cmpxchg.h @@ -52,16 +52,16 @@ __cmpxchg_u32(volatile int *p, int old, int new) return new; #else __asm__ __volatile__( - " rsil a15, "__stringify(TOPLEVEL)"\n" + " rsil a14, "__stringify(TOPLEVEL)"\n" " l32i %[old], %[mem]\n" " bne %[old], %[cmp], 1f\n" " s32i %[new], %[mem]\n" "1:\n" - " wsr a15, ps\n" + " wsr a14, ps\n" " rsync\n" : [old] "=&a" (old), [mem] "+m" (*p) : [cmp] "a" (old), [new] "r" (new) - : "a15", "memory"); + : "a14", "memory"); return old; #endif } @@ -116,10 +116,10 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr, /* * xchg_u32 * - * Note that a15 is used here because the register allocation + * Note that a14 is used here because the register allocation * done by the compiler is not guaranteed and a window overflow * may not occur between the rsil and wsr instructions. By using - * a15 in the rsil, the machine is guaranteed to be in a state + * a14 in the rsil, the machine is guaranteed to be in a state * where no register reference will cause an overflow. */ @@ -157,14 +157,14 @@ static inline unsigned long xchg_u32(volatile int * m, unsigned long val) #else unsigned long tmp; __asm__ __volatile__( - " rsil a15, "__stringify(TOPLEVEL)"\n" + " rsil a14, "__stringify(TOPLEVEL)"\n" " l32i %[tmp], %[mem]\n" " s32i %[val], %[mem]\n" - " wsr a15, ps\n" + " wsr a14, ps\n" " rsync\n" : [tmp] "=&a" (tmp), [mem] "+m" (*m) : [val] "a" (val) - : "a15", "memory"); + : "a14", "memory"); return tmp; #endif } diff --git a/arch/xtensa/include/asm/core.h b/arch/xtensa/include/asm/core.h index 5590b0f68837..9138077e567d 100644 --- a/arch/xtensa/include/asm/core.h +++ b/arch/xtensa/include/asm/core.h @@ -26,4 +26,15 @@ #define XCHAL_SPANNING_WAY 0 #endif +#if XCHAL_HAVE_WINDOWED +#if defined(CONFIG_USER_ABI_DEFAULT) || defined(CONFIG_USER_ABI_CALL0_PROBE) +/* Whether windowed ABI is supported in userspace. */ +#define USER_SUPPORT_WINDOWED +#endif +#if defined(__XTENSA_WINDOWED_ABI__) || defined(USER_SUPPORT_WINDOWED) +/* Whether windowed ABI is supported either in userspace or in the kernel. */ +#define SUPPORT_WINDOWED +#endif +#endif + #endif diff --git a/arch/xtensa/include/asm/processor.h b/arch/xtensa/include/asm/processor.h index ad15fbc57283..37d3e9887fe7 100644 --- a/arch/xtensa/include/asm/processor.h +++ b/arch/xtensa/include/asm/processor.h @@ -18,12 +18,6 @@ #include <asm/types.h> #include <asm/regs.h> -/* Assertions. */ - -#if (XCHAL_HAVE_WINDOWED != 1) -# error Linux requires the Xtensa Windowed Registers Option. -#endif - /* Xtensa ABI requires stack alignment to be at least 16 */ #define STACK_ALIGN (XCHAL_DATA_WIDTH > 16 ? XCHAL_DATA_WIDTH : 16) @@ -105,8 +99,18 @@ #define WSBITS (XCHAL_NUM_AREGS / 4) /* width of WINDOWSTART in bits */ #define WBBITS (XCHAL_NUM_AREGS_LOG2 - 2) /* width of WINDOWBASE in bits */ +#if defined(__XTENSA_WINDOWED_ABI__) +#define KERNEL_PS_WOE_MASK PS_WOE_MASK +#elif defined(__XTENSA_CALL0_ABI__) +#define KERNEL_PS_WOE_MASK 0 +#else +#error Unsupported xtensa ABI +#endif + #ifndef __ASSEMBLY__ +#if defined(__XTENSA_WINDOWED_ABI__) + /* Build a valid return address for the specified call winsize. * winsize must be 1 (call4), 2 (call8), or 3 (call12) */ @@ -117,6 +121,22 @@ */ #define MAKE_PC_FROM_RA(ra,sp) (((ra) & 0x3fffffff) | ((sp) & 0xc0000000)) +#elif defined(__XTENSA_CALL0_ABI__) + +/* Build a valid return address for the specified call winsize. + * winsize must be 1 (call4), 2 (call8), or 3 (call12) + */ +#define MAKE_RA_FOR_CALL(ra, ws) (ra) + +/* Convert return address to a valid pc + * Note: We assume that the stack pointer is in the same 1GB ranges as the ra + */ +#define MAKE_PC_FROM_RA(ra, sp) (ra) + +#else +#error Unsupported Xtensa ABI +#endif + /* Spill slot location for the register reg in the spill area under the stack * pointer sp. reg must be in the range [0..4). */ diff --git a/arch/xtensa/include/asm/sections.h b/arch/xtensa/include/asm/sections.h new file mode 100644 index 000000000000..a8c42d08e281 --- /dev/null +++ b/arch/xtensa/include/asm/sections.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _XTENSA_SECTIONS_H +#define _XTENSA_SECTIONS_H + +#include <asm-generic/sections.h> + +#ifdef CONFIG_VECTORS_ADDR +extern char _WindowVectors_text_start[]; +extern char _WindowVectors_text_end[]; +extern char _DebugInterruptVector_text_start[]; +extern char _DebugInterruptVector_text_end[]; +extern char _KernelExceptionVector_text_start[]; +extern char _KernelExceptionVector_text_end[]; +extern char _UserExceptionVector_text_start[]; +extern char _UserExceptionVector_text_end[]; +extern char _DoubleExceptionVector_text_start[]; +extern char _DoubleExceptionVector_text_end[]; +extern char _exception_text_start[]; +extern char _exception_text_end[]; +extern char _Level2InterruptVector_text_start[]; +extern char _Level2InterruptVector_text_end[]; +extern char _Level3InterruptVector_text_start[]; +extern char _Level3InterruptVector_text_end[]; +extern char _Level4InterruptVector_text_start[]; +extern char _Level4InterruptVector_text_end[]; +extern char _Level5InterruptVector_text_start[]; +extern char _Level5InterruptVector_text_end[]; +extern char _Level6InterruptVector_text_start[]; +extern char _Level6InterruptVector_text_end[]; +#endif +#ifdef CONFIG_SMP +extern char _SecondaryResetVector_text_start[]; +extern char _SecondaryResetVector_text_end[]; +#endif +#ifdef CONFIG_XIP_KERNEL +extern char _xip_start[]; +extern char _xip_end[]; +#endif + +#endif diff --git a/arch/xtensa/include/asm/syscall.h b/arch/xtensa/include/asm/syscall.h index f9a671cbf933..5ee974bf8330 100644 --- a/arch/xtensa/include/asm/syscall.h +++ b/arch/xtensa/include/asm/syscall.h @@ -68,17 +68,6 @@ static inline void syscall_get_arguments(struct task_struct *task, args[i] = regs->areg[reg[i]]; } -static inline void syscall_set_arguments(struct task_struct *task, - struct pt_regs *regs, - const unsigned long *args) -{ - static const unsigned int reg[] = XTENSA_SYSCALL_ARGUMENT_REGS; - unsigned int i; - - for (i = 0; i < 6; ++i) - regs->areg[reg[i]] = args[i]; -} - asmlinkage long xtensa_rt_sigreturn(void); asmlinkage long xtensa_shmat(int, char __user *, int); asmlinkage long xtensa_fadvise64_64(int, int, diff --git a/arch/xtensa/include/asm/traps.h b/arch/xtensa/include/asm/traps.h index f720a57d0a5b..6fa47cd8e02d 100644 --- a/arch/xtensa/include/asm/traps.h +++ b/arch/xtensa/include/asm/traps.h @@ -56,6 +56,7 @@ void secondary_trap_init(void); static inline void spill_registers(void) { +#if defined(__XTENSA_WINDOWED_ABI__) #if XCHAL_NUM_AREGS > 16 __asm__ __volatile__ ( " call8 1f\n" @@ -96,6 +97,7 @@ static inline void spill_registers(void) " mov a12, a12\n" : : : "memory"); #endif +#endif } struct debug_table { diff --git a/arch/xtensa/kernel/align.S b/arch/xtensa/kernel/align.S index 9301452e521e..d062c732ef18 100644 --- a/arch/xtensa/kernel/align.S +++ b/arch/xtensa/kernel/align.S @@ -58,7 +58,9 @@ * BE shift left / mask 0 0 X X */ +#if XCHAL_HAVE_WINDOWED #define UNALIGNED_USER_EXCEPTION +#endif #if XCHAL_HAVE_BE diff --git a/arch/xtensa/kernel/entry.S b/arch/xtensa/kernel/entry.S index 647b162f959b..99ab3c1a3387 100644 --- a/arch/xtensa/kernel/entry.S +++ b/arch/xtensa/kernel/entry.S @@ -158,6 +158,7 @@ _user_exception: /* Rotate ws so that the current windowbase is at bit0. */ /* Assume ws = xxwww1yyyy. Rotate ws right, so that a2 = yyyyxxwww1 */ +#if defined(USER_SUPPORT_WINDOWED) rsr a2, windowbase rsr a3, windowstart ssr a2 @@ -167,24 +168,33 @@ _user_exception: src a2, a3, a2 srli a2, a2, 32-WSBITS s32i a2, a1, PT_WMASK # needed for restoring registers +#else + movi a2, 0 + movi a3, 1 + s32i a2, a1, PT_WINDOWBASE + s32i a3, a1, PT_WINDOWSTART + s32i a3, a1, PT_WMASK +#endif /* Save only live registers. */ - _bbsi.l a2, 1, 1f +UABI_W _bbsi.l a2, 1, 1f s32i a4, a1, PT_AREG4 s32i a5, a1, PT_AREG5 s32i a6, a1, PT_AREG6 s32i a7, a1, PT_AREG7 - _bbsi.l a2, 2, 1f +UABI_W _bbsi.l a2, 2, 1f s32i a8, a1, PT_AREG8 s32i a9, a1, PT_AREG9 s32i a10, a1, PT_AREG10 s32i a11, a1, PT_AREG11 - _bbsi.l a2, 3, 1f +UABI_W _bbsi.l a2, 3, 1f s32i a12, a1, PT_AREG12 s32i a13, a1, PT_AREG13 s32i a14, a1, PT_AREG14 s32i a15, a1, PT_AREG15 + +#if defined(USER_SUPPORT_WINDOWED) _bnei a2, 1, 1f # only one valid frame? /* Only one valid frame, skip saving regs. */ @@ -239,7 +249,7 @@ _user_exception: rsync /* We are back to the original stack pointer (a1) */ - +#endif 2: /* Now, jump to the common exception handler. */ j common_exception @@ -295,6 +305,7 @@ _kernel_exception: s32i a3, a1, PT_SAR s32i a2, a1, PT_ICOUNTLEVEL +#if defined(__XTENSA_WINDOWED_ABI__) /* Rotate ws so that the current windowbase is at bit0. */ /* Assume ws = xxwww1yyyy. Rotate ws right, so that a2 = yyyyxxwww1 */ @@ -305,27 +316,28 @@ _kernel_exception: src a2, a3, a2 srli a2, a2, 32-WSBITS s32i a2, a1, PT_WMASK # needed for kernel_exception_exit +#endif /* Save only the live window-frame */ - _bbsi.l a2, 1, 1f +KABI_W _bbsi.l a2, 1, 1f s32i a4, a1, PT_AREG4 s32i a5, a1, PT_AREG5 s32i a6, a1, PT_AREG6 s32i a7, a1, PT_AREG7 - _bbsi.l a2, 2, 1f +KABI_W _bbsi.l a2, 2, 1f s32i a8, a1, PT_AREG8 s32i a9, a1, PT_AREG9 s32i a10, a1, PT_AREG10 s32i a11, a1, PT_AREG11 - _bbsi.l a2, 3, 1f +KABI_W _bbsi.l a2, 3, 1f s32i a12, a1, PT_AREG12 s32i a13, a1, PT_AREG13 s32i a14, a1, PT_AREG14 s32i a15, a1, PT_AREG15 +#ifdef __XTENSA_WINDOWED_ABI__ _bnei a2, 1, 1f - /* Copy spill slots of a0 and a1 to imitate movsp * in order to keep exception stack continuous */ @@ -333,6 +345,7 @@ _kernel_exception: l32i a0, a1, PT_SIZE + 4 s32e a3, a1, -16 s32e a0, a1, -12 +#endif 1: l32i a0, a1, PT_AREG0 # restore saved a0 wsr a0, depc @@ -419,16 +432,16 @@ common_exception: movi a3, LOCKLEVEL .Lexception: - movi a0, PS_WOE_MASK - or a3, a3, a0 +KABI_W movi a0, PS_WOE_MASK +KABI_W or a3, a3, a0 #else addi a2, a2, -EXCCAUSE_LEVEL1_INTERRUPT movi a0, LOCKLEVEL extui a3, a3, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH # a3 = PS.INTLEVEL moveqz a3, a0, a2 # a3 = LOCKLEVEL iff interrupt - movi a2, PS_WOE_MASK - or a3, a3, a2 +KABI_W movi a2, PS_WOE_MASK +KABI_W or a3, a3, a2 rsr a2, exccause #endif @@ -461,14 +474,14 @@ common_exception: */ rsr a4, excsave1 - mov a6, a1 # pass stack frame - mov a7, a2 # pass EXCCAUSE addx4 a4, a2, a4 l32i a4, a4, EXC_TABLE_DEFAULT # load handler + mov abi_arg1, a2 # pass EXCCAUSE + mov abi_arg0, a1 # pass stack frame /* Call the second-level handler */ - callx4 a4 + abi_callx a4 /* Jump here for exception exit */ .global common_exception_return @@ -482,15 +495,15 @@ common_exception_return: 1: irq_save a2, a3 #ifdef CONFIG_TRACE_IRQFLAGS - call4 trace_hardirqs_off + abi_call trace_hardirqs_off #endif /* Jump if we are returning from kernel exceptions. */ - l32i a3, a1, PT_PS + l32i abi_saved1, a1, PT_PS GET_THREAD_INFO(a2, a1) l32i a4, a2, TI_FLAGS - _bbci.l a3, PS_UM_BIT, 6f + _bbci.l abi_saved1, PS_UM_BIT, 6f /* Specific to a user exception exit: * We need to check some flags for signal handling and rescheduling, @@ -509,20 +522,20 @@ common_exception_return: /* Call do_signal() */ #ifdef CONFIG_TRACE_IRQFLAGS - call4 trace_hardirqs_on + abi_call trace_hardirqs_on #endif rsil a2, 0 - mov a6, a1 - call4 do_notify_resume # int do_notify_resume(struct pt_regs*) + mov abi_arg0, a1 + abi_call do_notify_resume # int do_notify_resume(struct pt_regs*) j 1b 3: /* Reschedule */ #ifdef CONFIG_TRACE_IRQFLAGS - call4 trace_hardirqs_on + abi_call trace_hardirqs_on #endif rsil a2, 0 - call4 schedule # void schedule (void) + abi_call schedule # void schedule (void) j 1b #ifdef CONFIG_PREEMPTION @@ -533,33 +546,33 @@ common_exception_return: l32i a4, a2, TI_PRE_COUNT bnez a4, 4f - call4 preempt_schedule_irq + abi_call preempt_schedule_irq j 4f #endif #if XTENSA_FAKE_NMI .LNMIexit: - l32i a3, a1, PT_PS - _bbci.l a3, PS_UM_BIT, 4f + l32i abi_saved1, a1, PT_PS + _bbci.l abi_saved1, PS_UM_BIT, 4f #endif 5: #ifdef CONFIG_HAVE_HW_BREAKPOINT _bbci.l a4, TIF_DB_DISABLED, 7f - call4 restore_dbreak + abi_call restore_dbreak 7: #endif #ifdef CONFIG_DEBUG_TLB_SANITY l32i a4, a1, PT_DEPC bgeui a4, VALID_DOUBLE_EXCEPTION_ADDRESS, 4f - call4 check_tlb_sanity + abi_call check_tlb_sanity #endif 6: 4: #ifdef CONFIG_TRACE_IRQFLAGS - extui a4, a3, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH + extui a4, abi_saved1, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH bgei a4, LOCKLEVEL, 1f - call4 trace_hardirqs_on + abi_call trace_hardirqs_on 1: #endif /* Restore optional registers. */ @@ -572,14 +585,15 @@ common_exception_return: l32i a2, a1, PT_SCOMPARE1 wsr a2, scompare1 #endif - wsr a3, ps /* disable interrupts */ + wsr abi_saved1, ps /* disable interrupts */ - _bbci.l a3, PS_UM_BIT, kernel_exception_exit + _bbci.l abi_saved1, PS_UM_BIT, kernel_exception_exit user_exception_exit: /* Restore the state of the task and return from the exception. */ +#if defined(USER_SUPPORT_WINDOWED) /* Switch to the user thread WINDOWBASE. Save SP temporarily in DEPC */ l32i a2, a1, PT_WINDOWBASE @@ -634,8 +648,10 @@ user_exception_exit: * frame where we had loaded a2), or at least the lower 4 bits * (if we have restored WSBITS-1 frames). */ - 2: +#else + movi a2, 1 +#endif #if XCHAL_HAVE_THREADPTR l32i a3, a1, PT_THREADPTR wur a3, threadptr @@ -650,6 +666,7 @@ user_exception_exit: kernel_exception_exit: +#if defined(__XTENSA_WINDOWED_ABI__) /* Check if we have to do a movsp. * * We only have to do a movsp if the previous window-frame has @@ -702,6 +719,9 @@ kernel_exception_exit: * * Note: We expect a2 to hold PT_WMASK */ +#else + movi a2, 1 +#endif common_exception_exit: @@ -920,14 +940,16 @@ unrecoverable_text: ENTRY(unrecoverable_exception) +#if XCHAL_HAVE_WINDOWED movi a0, 1 movi a1, 0 wsr a0, windowstart wsr a1, windowbase rsync +#endif - movi a1, PS_WOE_MASK | LOCKLEVEL + movi a1, KERNEL_PS_WOE_MASK | LOCKLEVEL wsr a1, ps rsync @@ -935,8 +957,8 @@ ENTRY(unrecoverable_exception) movi a0, 0 addi a1, a1, PT_REGS_OFFSET - movi a6, unrecoverable_text - call4 panic + movi abi_arg0, unrecoverable_text + abi_call panic 1: j 1b @@ -947,6 +969,7 @@ ENDPROC(unrecoverable_exception) __XTENSA_HANDLER .literal_position +#ifdef SUPPORT_WINDOWED /* * Fast-handler for alloca exceptions * @@ -1010,6 +1033,7 @@ ENTRY(fast_alloca) 8: j _WindowUnderflow8 4: j _WindowUnderflow4 ENDPROC(fast_alloca) +#endif #ifdef CONFIG_USER_ABI_CALL0_PROBE /* @@ -1206,7 +1230,8 @@ ENDPROC(fast_syscall_xtensa) * Note: We assume the stack pointer is EXC_TABLE_KSTK in the fixup handler. */ -#ifdef CONFIG_FAST_SYSCALL_SPILL_REGISTERS +#if defined(CONFIG_FAST_SYSCALL_SPILL_REGISTERS) && \ + defined(USER_SUPPORT_WINDOWED) ENTRY(fast_syscall_spill_registers) @@ -1403,12 +1428,12 @@ ENTRY(fast_syscall_spill_registers) rsr a3, excsave1 l32i a1, a3, EXC_TABLE_KSTK - movi a4, PS_WOE_MASK | LOCKLEVEL + movi a4, KERNEL_PS_WOE_MASK | LOCKLEVEL wsr a4, ps rsync - movi a6, SIGSEGV - call4 do_exit + movi abi_arg0, SIGSEGV + abi_call do_exit /* shouldn't return, so panic */ @@ -1887,57 +1912,77 @@ ENDPROC(fast_store_prohibited) ENTRY(system_call) +#if defined(__XTENSA_WINDOWED_ABI__) abi_entry_default +#elif defined(__XTENSA_CALL0_ABI__) + abi_entry(12) + + s32i a0, sp, 0 + s32i abi_saved0, sp, 4 + s32i abi_saved1, sp, 8 + mov abi_saved0, a2 +#else +#error Unsupported Xtensa ABI +#endif /* regs->syscall = regs->areg[2] */ - l32i a7, a2, PT_AREG2 - s32i a7, a2, PT_SYSCALL + l32i a7, abi_saved0, PT_AREG2 + s32i a7, abi_saved0, PT_SYSCALL GET_THREAD_INFO(a4, a1) - l32i a3, a4, TI_FLAGS + l32i abi_saved1, a4, TI_FLAGS movi a4, _TIF_WORK_MASK - and a3, a3, a4 - beqz a3, 1f + and abi_saved1, abi_saved1, a4 + beqz abi_saved1, 1f - mov a6, a2 - call4 do_syscall_trace_enter - beqz a6, .Lsyscall_exit - l32i a7, a2, PT_SYSCALL + mov abi_arg0, abi_saved0 + abi_call do_syscall_trace_enter + beqz abi_rv, .Lsyscall_exit + l32i a7, abi_saved0, PT_SYSCALL 1: /* syscall = sys_call_table[syscall_nr] */ movi a4, sys_call_table movi a5, __NR_syscalls - movi a6, -ENOSYS + movi abi_rv, -ENOSYS bgeu a7, a5, 1f addx4 a4, a7, a4 - l32i a4, a4, 0 + l32i abi_tmp0, a4, 0 /* Load args: arg0 - arg5 are passed via regs. */ - l32i a6, a2, PT_AREG6 - l32i a7, a2, PT_AREG3 - l32i a8, a2, PT_AREG4 - l32i a9, a2, PT_AREG5 - l32i a10, a2, PT_AREG8 - l32i a11, a2, PT_AREG9 + l32i abi_arg0, abi_saved0, PT_AREG6 + l32i abi_arg1, abi_saved0, PT_AREG3 + l32i abi_arg2, abi_saved0, PT_AREG4 + l32i abi_arg3, abi_saved0, PT_AREG5 + l32i abi_arg4, abi_saved0, PT_AREG8 + l32i abi_arg5, abi_saved0, PT_AREG9 - callx4 a4 + abi_callx abi_tmp0 1: /* regs->areg[2] = return_value */ - s32i a6, a2, PT_AREG2 - bnez a3, 1f + s32i abi_rv, abi_saved0, PT_AREG2 + bnez abi_saved1, 1f .Lsyscall_exit: +#if defined(__XTENSA_WINDOWED_ABI__) abi_ret_default +#elif defined(__XTENSA_CALL0_ABI__) + l32i a0, sp, 0 + l32i abi_saved0, sp, 4 + l32i abi_saved1, sp, 8 + abi_ret(12) +#else +#error Unsupported Xtensa ABI +#endif 1: - mov a6, a2 - call4 do_syscall_trace_leave - abi_ret_default + mov abi_arg0, abi_saved0 + abi_call do_syscall_trace_leave + j .Lsyscall_exit ENDPROC(system_call) @@ -1988,8 +2033,18 @@ ENDPROC(system_call) ENTRY(_switch_to) +#if defined(__XTENSA_WINDOWED_ABI__) abi_entry(XTENSA_SPILL_STACK_RESERVE) +#elif defined(__XTENSA_CALL0_ABI__) + abi_entry(16) + s32i a12, sp, 0 + s32i a13, sp, 4 + s32i a14, sp, 8 + s32i a15, sp, 12 +#else +#error Unsupported Xtensa ABI +#endif mov a11, a3 # and 'next' (a3) l32i a4, a2, TASK_THREAD_INFO @@ -2033,7 +2088,9 @@ ENTRY(_switch_to) /* Flush register file. */ +#if defined(__XTENSA_WINDOWED_ABI__) spill_registers_kernel +#endif /* Set kernel stack (and leave critical section) * Note: It's save to set it here. The stack will not be overwritten @@ -2055,34 +2112,43 @@ ENTRY(_switch_to) wsr a14, ps rsync +#if defined(__XTENSA_WINDOWED_ABI__) abi_ret(XTENSA_SPILL_STACK_RESERVE) +#elif defined(__XTENSA_CALL0_ABI__) + l32i a12, sp, 0 + l32i a13, sp, 4 + l32i a14, sp, 8 + l32i a15, sp, 12 + abi_ret(16) +#else +#error Unsupported Xtensa ABI +#endif ENDPROC(_switch_to) ENTRY(ret_from_fork) /* void schedule_tail (struct task_struct *prev) - * Note: prev is still in a6 (return value from fake call4 frame) + * Note: prev is still in abi_arg0 (return value from fake call frame) */ - call4 schedule_tail - - mov a6, a1 - call4 do_syscall_trace_leave + abi_call schedule_tail - j common_exception_return + mov abi_arg0, a1 + abi_call do_syscall_trace_leave + j common_exception_return ENDPROC(ret_from_fork) /* * Kernel thread creation helper - * On entry, set up by copy_thread: a2 = thread_fn, a3 = thread_fn arg - * left from _switch_to: a6 = prev + * On entry, set up by copy_thread: abi_saved0 = thread_fn, + * abi_saved1 = thread_fn arg. Left from _switch_to: abi_arg0 = prev */ ENTRY(ret_from_kernel_thread) - call4 schedule_tail - mov a6, a3 - callx4 a2 - j common_exception_return + abi_call schedule_tail + mov abi_arg0, abi_saved1 + abi_callx abi_saved0 + j common_exception_return ENDPROC(ret_from_kernel_thread) diff --git a/arch/xtensa/kernel/head.S b/arch/xtensa/kernel/head.S index b9b81e76beea..8484294bc623 100644 --- a/arch/xtensa/kernel/head.S +++ b/arch/xtensa/kernel/head.S @@ -15,6 +15,7 @@ * Kevin Chea */ +#include <asm/asmmacro.h> #include <asm/processor.h> #include <asm/page.h> #include <asm/cacheasm.h> @@ -66,11 +67,13 @@ _SetupOCD: * xt-gdb to single step via DEBUG exceptions received directly * by ocd. */ +#if XCHAL_HAVE_WINDOWED movi a1, 1 movi a0, 0 wsr a1, windowstart wsr a0, windowbase rsync +#endif movi a1, LOCKLEVEL wsr a1, ps @@ -193,9 +196,10 @@ ENTRY(_startup) movi a1, start_info l32i a1, a1, 0 - movi a2, PS_WOE_MASK | LOCKLEVEL - # WOE=1, INTLEVEL=LOCKLEVEL, UM=0 - wsr a2, ps # (enable reg-windows; progmode stack) + /* Disable interrupts. */ + /* Enable window exceptions if kernel is built with windowed ABI. */ + movi a2, KERNEL_PS_WOE_MASK | LOCKLEVEL + wsr a2, ps rsync #ifdef CONFIG_SMP @@ -267,13 +271,13 @@ ENTRY(_startup) l32i a1, a1, 0 #endif - movi a6, 0 - xsr a6, excsave1 + movi abi_arg0, 0 + xsr abi_arg0, excsave1 /* init_arch kick-starts the linux kernel */ - call4 init_arch - call4 start_kernel + abi_call init_arch + abi_call start_kernel should_never_return: j should_never_return @@ -297,10 +301,10 @@ should_never_return: s32i a3, a2, 0 memw - movi a6, 0 - wsr a6, excsave1 + movi abi_arg0, 0 + wsr abi_arg0, excsave1 - call4 secondary_start_kernel + abi_call secondary_start_kernel j should_never_return #endif /* CONFIG_SMP */ diff --git a/arch/xtensa/kernel/mcount.S b/arch/xtensa/kernel/mcount.S index 5e4619f52858..51daaf4e0b82 100644 --- a/arch/xtensa/kernel/mcount.S +++ b/arch/xtensa/kernel/mcount.S @@ -17,11 +17,16 @@ /* * Entry condition: * - * a2: a0 of the caller + * a2: a0 of the caller in windowed ABI + * a10: a0 of the caller in call0 ABI + * + * In call0 ABI the function _mcount is called with the special ABI: + * its argument is in a10 and all the usual argument registers (a2 - a7) + * must be preserved in addition to callee-saved a12 - a15. */ ENTRY(_mcount) - +#if defined(__XTENSA_WINDOWED_ABI__) abi_entry_default movi a4, ftrace_trace_function @@ -42,7 +47,36 @@ ENTRY(_mcount) callx4 a4 abi_ret_default +#elif defined(__XTENSA_CALL0_ABI__) + abi_entry_default + + movi a9, ftrace_trace_function + l32i a9, a9, 0 + movi a11, ftrace_stub + bne a9, a11, 1f + abi_ret_default +1: abi_entry(28) + s32i a0, sp, 0 + s32i a2, sp, 4 + s32i a3, sp, 8 + s32i a4, sp, 12 + s32i a5, sp, 16 + s32i a6, sp, 20 + s32i a7, sp, 24 + addi a2, a10, -MCOUNT_INSN_SIZE + callx0 a9 + l32i a0, sp, 0 + l32i a2, sp, 4 + l32i a3, sp, 8 + l32i a4, sp, 12 + l32i a5, sp, 16 + l32i a6, sp, 20 + l32i a7, sp, 24 + abi_ret(28) +#else +#error Unsupported Xtensa ABI +#endif ENDPROC(_mcount) ENTRY(ftrace_stub) diff --git a/arch/xtensa/kernel/process.c b/arch/xtensa/kernel/process.c index 47f933fed870..bd80df890b1e 100644 --- a/arch/xtensa/kernel/process.c +++ b/arch/xtensa/kernel/process.c @@ -211,11 +211,18 @@ int copy_thread(unsigned long clone_flags, unsigned long usp_thread_fn, struct thread_info *ti; #endif +#if defined(__XTENSA_WINDOWED_ABI__) /* Create a call4 dummy-frame: a0 = 0, a1 = childregs. */ SPILL_SLOT(childregs, 1) = (unsigned long)childregs; SPILL_SLOT(childregs, 0) = 0; p->thread.sp = (unsigned long)childregs; +#elif defined(__XTENSA_CALL0_ABI__) + /* Reserve 16 bytes for the _switch_to stack frame. */ + p->thread.sp = (unsigned long)childregs - 16; +#else +#error Unsupported Xtensa ABI +#endif if (!(p->flags & (PF_KTHREAD | PF_IO_WORKER))) { struct pt_regs *regs = current_pt_regs(); @@ -272,11 +279,25 @@ int copy_thread(unsigned long clone_flags, unsigned long usp_thread_fn, p->thread.ra = MAKE_RA_FOR_CALL( (unsigned long)ret_from_kernel_thread, 1); - /* pass parameters to ret_from_kernel_thread: - * a2 = thread_fn, a3 = thread_fn arg + /* pass parameters to ret_from_kernel_thread: */ +#if defined(__XTENSA_WINDOWED_ABI__) + /* + * a2 = thread_fn, a3 = thread_fn arg. + * Window underflow will load registers from the + * spill slots on the stack on return from _switch_to. */ - SPILL_SLOT(childregs, 3) = thread_fn_arg; SPILL_SLOT(childregs, 2) = usp_thread_fn; + SPILL_SLOT(childregs, 3) = thread_fn_arg; +#elif defined(__XTENSA_CALL0_ABI__) + /* + * a12 = thread_fn, a13 = thread_fn arg. + * _switch_to epilogue will load registers from the stack. + */ + ((unsigned long *)p->thread.sp)[0] = usp_thread_fn; + ((unsigned long *)p->thread.sp)[1] = thread_fn_arg; +#else +#error Unsupported Xtensa ABI +#endif /* Childregs are only used when we're going to userspace * in which case start_thread will set them up. diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c index ee9082a142fe..8db20cfb44ab 100644 --- a/arch/xtensa/kernel/setup.c +++ b/arch/xtensa/kernel/setup.c @@ -37,14 +37,15 @@ #include <asm/bootparam.h> #include <asm/kasan.h> #include <asm/mmu_context.h> -#include <asm/processor.h> -#include <asm/timex.h> -#include <asm/platform.h> #include <asm/page.h> -#include <asm/setup.h> #include <asm/param.h> +#include <asm/platform.h> +#include <asm/processor.h> +#include <asm/sections.h> +#include <asm/setup.h> #include <asm/smp.h> #include <asm/sysmem.h> +#include <asm/timex.h> #if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE) struct screen_info screen_info = { @@ -271,49 +272,6 @@ void __init init_arch(bp_tag_t *bp_start) * Initialize system. Setup memory and reserve regions. */ -extern char _end[]; -extern char _stext[]; -extern char _WindowVectors_text_start; -extern char _WindowVectors_text_end; -extern char _DebugInterruptVector_text_start; -extern char _DebugInterruptVector_text_end; -extern char _KernelExceptionVector_text_start; -extern char _KernelExceptionVector_text_end; -extern char _UserExceptionVector_text_start; -extern char _UserExceptionVector_text_end; -extern char _DoubleExceptionVector_text_start; -extern char _DoubleExceptionVector_text_end; -extern char _exception_text_start; -extern char _exception_text_end; -#if XCHAL_EXCM_LEVEL >= 2 -extern char _Level2InterruptVector_text_start; -extern char _Level2InterruptVector_text_end; -#endif -#if XCHAL_EXCM_LEVEL >= 3 -extern char _Level3InterruptVector_text_start; -extern char _Level3InterruptVector_text_end; -#endif -#if XCHAL_EXCM_LEVEL >= 4 -extern char _Level4InterruptVector_text_start; -extern char _Level4InterruptVector_text_end; -#endif -#if XCHAL_EXCM_LEVEL >= 5 -extern char _Level5InterruptVector_text_start; -extern char _Level5InterruptVector_text_end; -#endif -#if XCHAL_EXCM_LEVEL >= 6 -extern char _Level6InterruptVector_text_start; -extern char _Level6InterruptVector_text_end; -#endif -#ifdef CONFIG_SMP -extern char _SecondaryResetVector_text_start; -extern char _SecondaryResetVector_text_end; -#endif -#ifdef CONFIG_XIP_KERNEL -extern char _xip_start[]; -extern char _xip_end[]; -#endif - static inline int __init_memblock mem_reserve(unsigned long start, unsigned long end) { @@ -349,49 +307,51 @@ void __init setup_arch(char **cmdline_p) #endif #ifdef CONFIG_VECTORS_ADDR - mem_reserve(__pa(&_WindowVectors_text_start), - __pa(&_WindowVectors_text_end)); +#ifdef SUPPORT_WINDOWED + mem_reserve(__pa(_WindowVectors_text_start), + __pa(_WindowVectors_text_end)); +#endif - mem_reserve(__pa(&_DebugInterruptVector_text_start), - __pa(&_DebugInterruptVector_text_end)); + mem_reserve(__pa(_DebugInterruptVector_text_start), + __pa(_DebugInterruptVector_text_end)); - mem_reserve(__pa(&_KernelExceptionVector_text_start), - __pa(&_KernelExceptionVector_text_end)); + mem_reserve(__pa(_KernelExceptionVector_text_start), + __pa(_KernelExceptionVector_text_end)); - mem_reserve(__pa(&_UserExceptionVector_text_start), - __pa(&_UserExceptionVector_text_end)); + mem_reserve(__pa(_UserExceptionVector_text_start), + __pa(_UserExceptionVector_text_end)); - mem_reserve(__pa(&_DoubleExceptionVector_text_start), - __pa(&_DoubleExceptionVector_text_end)); + mem_reserve(__pa(_DoubleExceptionVector_text_start), + __pa(_DoubleExceptionVector_text_end)); - mem_reserve(__pa(&_exception_text_start), - __pa(&_exception_text_end)); + mem_reserve(__pa(_exception_text_start), + __pa(_exception_text_end)); #if XCHAL_EXCM_LEVEL >= 2 - mem_reserve(__pa(&_Level2InterruptVector_text_start), - __pa(&_Level2InterruptVector_text_end)); + mem_reserve(__pa(_Level2InterruptVector_text_start), + __pa(_Level2InterruptVector_text_end)); #endif #if XCHAL_EXCM_LEVEL >= 3 - mem_reserve(__pa(&_Level3InterruptVector_text_start), - __pa(&_Level3InterruptVector_text_end)); + mem_reserve(__pa(_Level3InterruptVector_text_start), + __pa(_Level3InterruptVector_text_end)); #endif #if XCHAL_EXCM_LEVEL >= 4 - mem_reserve(__pa(&_Level4InterruptVector_text_start), - __pa(&_Level4InterruptVector_text_end)); + mem_reserve(__pa(_Level4InterruptVector_text_start), + __pa(_Level4InterruptVector_text_end)); #endif #if XCHAL_EXCM_LEVEL >= 5 - mem_reserve(__pa(&_Level5InterruptVector_text_start), - __pa(&_Level5InterruptVector_text_end)); + mem_reserve(__pa(_Level5InterruptVector_text_start), + __pa(_Level5InterruptVector_text_end)); #endif #if XCHAL_EXCM_LEVEL >= 6 - mem_reserve(__pa(&_Level6InterruptVector_text_start), - __pa(&_Level6InterruptVector_text_end)); + mem_reserve(__pa(_Level6InterruptVector_text_start), + __pa(_Level6InterruptVector_text_end)); #endif #endif /* CONFIG_VECTORS_ADDR */ #ifdef CONFIG_SMP - mem_reserve(__pa(&_SecondaryResetVector_text_start), - __pa(&_SecondaryResetVector_text_end)); + mem_reserve(__pa(_SecondaryResetVector_text_start), + __pa(_SecondaryResetVector_text_end)); #endif parse_early_param(); bootmem_init(); diff --git a/arch/xtensa/kernel/signal.c b/arch/xtensa/kernel/signal.c index c4d77dbfb61a..f6c949895b3e 100644 --- a/arch/xtensa/kernel/signal.c +++ b/arch/xtensa/kernel/signal.c @@ -45,12 +45,13 @@ struct rt_sigframe unsigned int window[4]; }; -/* +#if defined(USER_SUPPORT_WINDOWED) +/* * Flush register windows stored in pt_regs to stack. * Returns 1 for errors. */ -int +static int flush_window_regs_user(struct pt_regs *regs) { const unsigned long ws = regs->windowstart; @@ -121,6 +122,13 @@ flush_window_regs_user(struct pt_regs *regs) errout: return err; } +#else +static int +flush_window_regs_user(struct pt_regs *regs) +{ + return 0; +} +#endif /* * Note: We don't copy double exception 'regs', we have to finish double exc. diff --git a/arch/xtensa/kernel/traps.c b/arch/xtensa/kernel/traps.c index 874b6efc6fb3..4b4dbeb2d612 100644 --- a/arch/xtensa/kernel/traps.c +++ b/arch/xtensa/kernel/traps.c @@ -97,7 +97,9 @@ static dispatch_init_table_t __initdata dispatch_init_table[] = { /* EXCCAUSE_INSTRUCTION_FETCH unhandled */ /* EXCCAUSE_LOAD_STORE_ERROR unhandled*/ { EXCCAUSE_LEVEL1_INTERRUPT, 0, do_interrupt }, +#ifdef SUPPORT_WINDOWED { EXCCAUSE_ALLOCA, USER|KRNL, fast_alloca }, +#endif /* EXCCAUSE_INTEGER_DIVIDE_BY_ZERO unhandled */ /* EXCCAUSE_PRIVILEGED unhandled */ #if XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION @@ -462,12 +464,10 @@ void secondary_trap_init(void) void show_regs(struct pt_regs * regs) { - int i, wmask; + int i; show_regs_print_info(KERN_DEFAULT); - wmask = regs->wmask & ~1; - for (i = 0; i < 16; i++) { if ((i % 8) == 0) pr_info("a%02d:", i); @@ -527,7 +527,7 @@ void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl) DEFINE_SPINLOCK(die_lock); -void die(const char * str, struct pt_regs * regs, long err) +void __noreturn die(const char * str, struct pt_regs * regs, long err) { static int die_counter; const char *pr = ""; diff --git a/arch/xtensa/kernel/vectors.S b/arch/xtensa/kernel/vectors.S index 1a7538ccfc5a..407ece204e7c 100644 --- a/arch/xtensa/kernel/vectors.S +++ b/arch/xtensa/kernel/vectors.S @@ -226,6 +226,7 @@ ENTRY(_DoubleExceptionVector) xsr a0, depc # get DEPC, save a0 +#ifdef SUPPORT_WINDOWED movi a2, WINDOW_VECTORS_VADDR _bltu a0, a2, .Lfixup addi a2, a2, WINDOW_VECTORS_SIZE @@ -275,6 +276,10 @@ _DoubleExceptionVector_WindowUnderflow: l32i a0, a0, EXC_TABLE_FAST_USER jx a0 +#else + j .Lfixup +#endif + /* * We only allow the ITLB miss exception if we are in kernel space. * All other exceptions are unexpected and thus unrecoverable! @@ -343,6 +348,7 @@ _DoubleExceptionVector_WindowUnderflow: l32i a0, a0, EXC_TABLE_FAST_USER jx a0 +#ifdef SUPPORT_WINDOWED /* * Restart window OVERFLOW exception. * Currently: @@ -475,9 +481,12 @@ _DoubleExceptionVector_handle_exception: rsr a0, depc rotw -3 j 1b +#endif ENDPROC(_DoubleExceptionVector) +#ifdef SUPPORT_WINDOWED + /* * Fixup handler for TLB miss in double exception handler for window owerflow. * We get here with windowbase set to the window that was being spilled and @@ -590,6 +599,8 @@ ENTRY(window_overflow_restore_a0_fixup) ENDPROC(window_overflow_restore_a0_fixup) +#endif + /* * Debug interrupt vector * @@ -650,6 +661,25 @@ ENTRY(_Level\level\()InterruptVector) irq_entry_level 5 irq_entry_level 6 +#if XCHAL_EXCM_LEVEL >= 2 + /* + * Continuation of medium priority interrupt dispatch code. + * On entry here, a0 contains PS, and EPC2 contains saved a0: + */ + __XTENSA_HANDLER + .align 4 +_SimulateUserKernelVectorException: + addi a0, a0, (1 << PS_EXCM_BIT) +#if !XTENSA_FAKE_NMI + wsr a0, ps +#endif + bbsi.l a0, PS_UM_BIT, 1f # branch if user mode + xsr a0, excsave2 # restore a0 + j _KernelExceptionVector # simulate kernel vector exception +1: xsr a0, excsave2 # restore a0 + j _UserExceptionVector # simulate user vector exception +#endif + /* Window overflow and underflow handlers. * The handlers must be 64 bytes apart, first starting with the underflow @@ -668,6 +698,8 @@ ENTRY(_Level\level\()InterruptVector) .section .WindowVectors.text, "ax" +#ifdef SUPPORT_WINDOWED + /* 4-Register Window Overflow Vector (Handler) */ ENTRY_ALIGN64(_WindowOverflow4) @@ -680,27 +712,6 @@ ENTRY_ALIGN64(_WindowOverflow4) ENDPROC(_WindowOverflow4) - -#if XCHAL_EXCM_LEVEL >= 2 - /* Not a window vector - but a convenient location - * (where we know there's space) for continuation of - * medium priority interrupt dispatch code. - * On entry here, a0 contains PS, and EPC2 contains saved a0: - */ - .align 4 -_SimulateUserKernelVectorException: - addi a0, a0, (1 << PS_EXCM_BIT) -#if !XTENSA_FAKE_NMI - wsr a0, ps -#endif - bbsi.l a0, PS_UM_BIT, 1f # branch if user mode - xsr a0, excsave2 # restore a0 - j _KernelExceptionVector # simulate kernel vector exception -1: xsr a0, excsave2 # restore a0 - j _UserExceptionVector # simulate user vector exception -#endif - - /* 4-Register Window Underflow Vector (Handler) */ ENTRY_ALIGN64(_WindowUnderflow4) @@ -789,4 +800,6 @@ ENTRY_ALIGN64(_WindowUnderflow12) ENDPROC(_WindowUnderflow12) +#endif + .text diff --git a/arch/xtensa/kernel/vmlinux.lds.S b/arch/xtensa/kernel/vmlinux.lds.S index d23a6e38f062..eee270a039a4 100644 --- a/arch/xtensa/kernel/vmlinux.lds.S +++ b/arch/xtensa/kernel/vmlinux.lds.S @@ -94,7 +94,9 @@ SECTIONS . = ALIGN(PAGE_SIZE); _vecbase = .; +#ifdef SUPPORT_WINDOWED SECTION_VECTOR2 (.WindowVectors.text, WINDOW_VECTORS_VADDR) +#endif #if XCHAL_EXCM_LEVEL >= 2 SECTION_VECTOR2 (.Level2InterruptVector.text, INTLEVEL2_VECTOR_VADDR) #endif @@ -166,8 +168,10 @@ SECTIONS __boot_reloc_table_start = ABSOLUTE(.); #if !MERGED_VECTORS +#ifdef SUPPORT_WINDOWED RELOCATE_ENTRY(_WindowVectors_text, .WindowVectors.text); +#endif #if XCHAL_EXCM_LEVEL >= 2 RELOCATE_ENTRY(_Level2InterruptVector_text, .Level2InterruptVector.text); @@ -229,14 +233,18 @@ SECTIONS #if !MERGED_VECTORS /* The vectors are relocated to the real position at startup time */ +#ifdef SUPPORT_WINDOWED SECTION_VECTOR4 (_WindowVectors_text, .WindowVectors.text, WINDOW_VECTORS_VADDR, - .dummy) + LAST) +#undef LAST +#define LAST .WindowVectors.text +#endif SECTION_VECTOR4 (_DebugInterruptVector_text, .DebugInterruptVector.text, DEBUG_VECTOR_VADDR, - .WindowVectors.text) + LAST) #undef LAST #define LAST .DebugInterruptVector.text #if XCHAL_EXCM_LEVEL >= 2 diff --git a/arch/xtensa/lib/strncpy_user.S b/arch/xtensa/lib/strncpy_user.S index 4faf46fe3f38..0731912227d3 100644 --- a/arch/xtensa/lib/strncpy_user.S +++ b/arch/xtensa/lib/strncpy_user.S @@ -45,7 +45,6 @@ # a9/ tmp # a10/ tmp # a11/ dst -# a12/ tmp .text ENTRY(__strncpy_user) @@ -61,7 +60,7 @@ ENTRY(__strncpy_user) bbsi.l a3, 0, .Lsrc1mod2 # if only 8-bit aligned bbsi.l a3, 1, .Lsrc2mod4 # if only 16-bit aligned .Lsrcaligned: # return here when src is word-aligned - srli a12, a4, 2 # number of loop iterations with 4B per loop + srli a10, a4, 2 # number of loop iterations with 4B per loop movi a9, 3 bnone a11, a9, .Laligned j .Ldstunaligned @@ -102,11 +101,11 @@ EX(10f) s8i a9, a11, 0 # store byte 0 .byte 0 # (0 mod 4 alignment for LBEG) .Laligned: #if XCHAL_HAVE_LOOPS - loopnez a12, .Loop1done + loopnez a10, .Loop1done #else - beqz a12, .Loop1done - slli a12, a12, 2 - add a12, a12, a11 # a12 = end of last 4B chunck + beqz a10, .Loop1done + slli a10, a10, 2 + add a10, a10, a11 # a10 = end of last 4B chunck #endif .Loop1: EX(11f) l32i a9, a3, 0 # get word from src @@ -118,7 +117,7 @@ EX(10f) s32i a9, a11, 0 # store word to dst bnone a9, a8, .Lz3 # if byte 3 is zero addi a11, a11, 4 # advance dst pointer #if !XCHAL_HAVE_LOOPS - blt a11, a12, .Loop1 + blt a11, a10, .Loop1 #endif .Loop1done: @@ -185,7 +184,7 @@ EX(10f) s8i a9, a11, 2 loopnez a4, .Lunalignedend #else beqz a4, .Lunalignedend - add a12, a11, a4 # a12 = ending address + add a10, a11, a4 # a10 = ending address #endif /* XCHAL_HAVE_LOOPS */ .Lnextbyte: EX(11f) l8ui a9, a3, 0 @@ -194,7 +193,7 @@ EX(10f) s8i a9, a11, 0 beqz a9, .Lunalignedend addi a11, a11, 1 #if !XCHAL_HAVE_LOOPS - blt a11, a12, .Lnextbyte + blt a11, a10, .Lnextbyte #endif .Lunalignedend: diff --git a/arch/xtensa/lib/usercopy.S b/arch/xtensa/lib/usercopy.S index a0aa4047f94a..16128c094c62 100644 --- a/arch/xtensa/lib/usercopy.S +++ b/arch/xtensa/lib/usercopy.S @@ -60,7 +60,12 @@ .text ENTRY(__xtensa_copy_user) - abi_entry_default +#if !XCHAL_HAVE_LOOPS && defined(__XTENSA_CALL0_ABI__) +#define STACK_SIZE 4 +#else +#define STACK_SIZE 0 +#endif + abi_entry(STACK_SIZE) # a2/ dst, a3/ src, a4/ len mov a5, a2 # copy dst so that a2 is return value mov a11, a4 # preserve original len for error case @@ -75,7 +80,7 @@ ENTRY(__xtensa_copy_user) __ssa8 a3 # set shift amount from byte offset bnez a4, .Lsrcunaligned movi a2, 0 # return success for len==0 - abi_ret_default + abi_ret(STACK_SIZE) /* * Destination is unaligned @@ -127,7 +132,7 @@ EX(10f) s8i a6, a5, 0 #endif /* !XCHAL_HAVE_LOOPS */ .Lbytecopydone: movi a2, 0 # return success for len bytes copied - abi_ret_default + abi_ret(STACK_SIZE) /* * Destination and source are word-aligned. @@ -187,7 +192,7 @@ EX(10f) l8ui a6, a3, 0 EX(10f) s8i a6, a5, 0 .L5: movi a2, 0 # return success for len bytes copied - abi_ret_default + abi_ret(STACK_SIZE) /* * Destination is aligned, Source is unaligned @@ -205,8 +210,14 @@ EX(10f) l32i a6, a3, 0 # load first word loopnez a7, .Loop2done #else /* !XCHAL_HAVE_LOOPS */ beqz a7, .Loop2done +#if defined(__XTENSA_CALL0_ABI__) + s32i a10, a1, 0 + slli a10, a7, 4 + add a10, a10, a3 # a10 = end of last 16B source chunk +#else slli a12, a7, 4 add a12, a12, a3 # a12 = end of last 16B source chunk +#endif #endif /* !XCHAL_HAVE_LOOPS */ .Loop2: EX(10f) l32i a7, a3, 4 @@ -224,7 +235,12 @@ EX(10f) s32i a8, a5, 8 EX(10f) s32i a9, a5, 12 addi a5, a5, 16 #if !XCHAL_HAVE_LOOPS +#if defined(__XTENSA_CALL0_ABI__) + blt a3, a10, .Loop2 + l32i a10, a1, 0 +#else blt a3, a12, .Loop2 +#endif #endif /* !XCHAL_HAVE_LOOPS */ .Loop2done: bbci.l a4, 3, .L12 @@ -264,7 +280,7 @@ EX(10f) l8ui a6, a3, 0 EX(10f) s8i a6, a5, 0 .L15: movi a2, 0 # return success for len bytes copied - abi_ret_default + abi_ret(STACK_SIZE) ENDPROC(__xtensa_copy_user) @@ -281,4 +297,4 @@ ENDPROC(__xtensa_copy_user) 10: sub a2, a5, a2 /* a2 <-- bytes copied */ sub a2, a11, a2 /* a2 <-- bytes not copied */ - abi_ret_default + abi_ret(STACK_SIZE) diff --git a/arch/xtensa/mm/fault.c b/arch/xtensa/mm/fault.c index 95a74890c7e9..fd6a70635962 100644 --- a/arch/xtensa/mm/fault.c +++ b/arch/xtensa/mm/fault.c @@ -238,7 +238,7 @@ bad_page_fault: void bad_page_fault(struct pt_regs *regs, unsigned long address, int sig) { - extern void die(const char*, struct pt_regs*, long); + extern void __noreturn die(const char*, struct pt_regs*, long); const struct exception_table_entry *entry; /* Are we prepared to handle this kernel fault? */ @@ -257,5 +257,4 @@ bad_page_fault(struct pt_regs *regs, unsigned long address, int sig) "address %08lx\n pc = %08lx, ra = %08lx\n", address, regs->pc, regs->areg[0]); die("Oops", regs, sig); - do_exit(sig); } |