diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2015-11-13 02:33:11 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-11-13 02:33:11 +0300 |
commit | a18e2fa5e670a1b84e66522b221c42875b02028a (patch) | |
tree | 30d7724f3f8e82c9408e8bd32b141f114d46b1c9 /arch/arm64 | |
parent | 7dac7102afbeb99daa454f555f1ea1f42fad2f78 (diff) | |
parent | 01b305a234943c25c336a6f2f77932a4eaf125fa (diff) | |
download | linux-a18e2fa5e670a1b84e66522b221c42875b02028a.tar.xz |
Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
Pull arm64 fixes and clean-ups from Catalin Marinas:
"Here's a second pull request for this merging window with some
fixes/clean-ups:
- __cmpxchg_double*() return type fix to avoid truncation of a long
to int and subsequent logical "not" in cmpxchg_double()
misinterpreting the operation success/failure
- BPF fixes for mod and div by zero
- Fix compilation with STRICT_MM_TYPECHECKS enabled
- VDSO build fix without libgcov
- Some static and __maybe_unused annotations
- Kconfig clean-up (FRAME_POINTER)
- defconfig update for CRYPTO_CRC32_ARM64"
* tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux:
arm64: suspend: make hw_breakpoint_restore static
arm64: mmu: make split_pud and fixup_executable static
arm64: smp: make of_parse_and_init_cpus static
arm64: use linux/types.h in kvm.h
arm64: build vdso without libgcov
arm64: mark cpus_have_hwcap as __maybe_unused
arm64: remove redundant FRAME_POINTER kconfig option and force to select it
arm64: fix R/O permissions of FDT mapping
arm64: fix STRICT_MM_TYPECHECKS issue in PTE_CONT manipulation
arm64: bpf: fix mod-by-zero case
arm64: bpf: fix div-by-zero case
arm64: Enable CRYPTO_CRC32_ARM64 in defconfig
arm64: cmpxchg_dbl: fix return value type
Diffstat (limited to 'arch/arm64')
-rw-r--r-- | arch/arm64/Kconfig | 1 | ||||
-rw-r--r-- | arch/arm64/Kconfig.debug | 4 | ||||
-rw-r--r-- | arch/arm64/configs/defconfig | 1 | ||||
-rw-r--r-- | arch/arm64/include/asm/atomic_ll_sc.h | 2 | ||||
-rw-r--r-- | arch/arm64/include/asm/atomic_lse.h | 2 | ||||
-rw-r--r-- | arch/arm64/include/asm/pgtable.h | 1 | ||||
-rw-r--r-- | arch/arm64/include/uapi/asm/kvm.h | 2 | ||||
-rw-r--r-- | arch/arm64/kernel/cpufeature.c | 2 | ||||
-rw-r--r-- | arch/arm64/kernel/smp.c | 2 | ||||
-rw-r--r-- | arch/arm64/kernel/suspend.c | 2 | ||||
-rw-r--r-- | arch/arm64/kernel/vdso/Makefile | 3 | ||||
-rw-r--r-- | arch/arm64/mm/mmu.c | 8 | ||||
-rw-r--r-- | arch/arm64/net/bpf_jit.h | 3 | ||||
-rw-r--r-- | arch/arm64/net/bpf_jit_comp.c | 54 |
14 files changed, 54 insertions, 33 deletions
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 851fe11c6069..9ac16a482ff1 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -27,6 +27,7 @@ config ARM64 select CPU_PM if (SUSPEND || CPU_IDLE) select DCACHE_WORD_ACCESS select EDAC_SUPPORT + select FRAME_POINTER select GENERIC_ALLOCATOR select GENERIC_CLOCKEVENTS select GENERIC_CLOCKEVENTS_BROADCAST diff --git a/arch/arm64/Kconfig.debug b/arch/arm64/Kconfig.debug index c24d6adc0420..04fb73b973f1 100644 --- a/arch/arm64/Kconfig.debug +++ b/arch/arm64/Kconfig.debug @@ -2,10 +2,6 @@ menu "Kernel hacking" source "lib/Kconfig.debug" -config FRAME_POINTER - bool - default y - config ARM64_PTDUMP bool "Export kernel pagetable layout to userspace via debugfs" depends on DEBUG_KERNEL diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig index 2f71f9cdd39c..bdd7aa358d2a 100644 --- a/arch/arm64/configs/defconfig +++ b/arch/arm64/configs/defconfig @@ -224,3 +224,4 @@ CONFIG_CRYPTO_GHASH_ARM64_CE=y CONFIG_CRYPTO_AES_ARM64_CE_CCM=y CONFIG_CRYPTO_AES_ARM64_CE_BLK=y CONFIG_CRYPTO_AES_ARM64_NEON_BLK=y +CONFIG_CRYPTO_CRC32_ARM64=y diff --git a/arch/arm64/include/asm/atomic_ll_sc.h b/arch/arm64/include/asm/atomic_ll_sc.h index 74d0b8eb0799..f61c84f6ba02 100644 --- a/arch/arm64/include/asm/atomic_ll_sc.h +++ b/arch/arm64/include/asm/atomic_ll_sc.h @@ -233,7 +233,7 @@ __CMPXCHG_CASE( , , mb_8, dmb ish, , l, "memory") #undef __CMPXCHG_CASE #define __CMPXCHG_DBL(name, mb, rel, cl) \ -__LL_SC_INLINE int \ +__LL_SC_INLINE long \ __LL_SC_PREFIX(__cmpxchg_double##name(unsigned long old1, \ unsigned long old2, \ unsigned long new1, \ diff --git a/arch/arm64/include/asm/atomic_lse.h b/arch/arm64/include/asm/atomic_lse.h index 1fce7908e690..197e06afbf71 100644 --- a/arch/arm64/include/asm/atomic_lse.h +++ b/arch/arm64/include/asm/atomic_lse.h @@ -387,7 +387,7 @@ __CMPXCHG_CASE(x, , mb_8, al, "memory") #define __LL_SC_CMPXCHG_DBL(op) __LL_SC_CALL(__cmpxchg_double##op) #define __CMPXCHG_DBL(name, mb, cl...) \ -static inline int __cmpxchg_double##name(unsigned long old1, \ +static inline long __cmpxchg_double##name(unsigned long old1, \ unsigned long old2, \ unsigned long new1, \ unsigned long new2, \ diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index f3acf421ded4..9819a9426b69 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -80,6 +80,7 @@ extern void __pgd_error(const char *file, int line, unsigned long val); #define _PAGE_DEFAULT (PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL)) #define PAGE_KERNEL __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE) +#define PAGE_KERNEL_RO __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_RDONLY) #define PAGE_KERNEL_EXEC __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE) #define PAGE_KERNEL_EXEC_CONT __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_CONT) diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h index 0cd7b5947dfc..2d4ca4bb0dd3 100644 --- a/arch/arm64/include/uapi/asm/kvm.h +++ b/arch/arm64/include/uapi/asm/kvm.h @@ -32,7 +32,7 @@ #ifndef __ASSEMBLY__ #include <linux/psci.h> -#include <asm/types.h> +#include <linux/types.h> #include <asm/ptrace.h> #define __KVM_HAVE_GUEST_DEBUG diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 52f0d7a5a1c2..c8cf89223b5a 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -696,7 +696,7 @@ static void cap_set_hwcap(const struct arm64_cpu_capabilities *cap) } /* Check if we have a particular HWCAP enabled */ -static bool cpus_have_hwcap(const struct arm64_cpu_capabilities *cap) +static bool __maybe_unused cpus_have_hwcap(const struct arm64_cpu_capabilities *cap) { bool rc; diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index 2bbdc0e4fd14..b1adc51b2c2e 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -473,7 +473,7 @@ acpi_parse_gic_cpu_interface(struct acpi_subtable_header *header, * cpu logical map array containing MPIDR values related to logical * cpus. Assumes that cpu_logical_map(0) has already been initialized. */ -void __init of_parse_and_init_cpus(void) +static void __init of_parse_and_init_cpus(void) { struct device_node *dn = NULL; diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c index 40f7b33a22da..fce95e17cf7f 100644 --- a/arch/arm64/kernel/suspend.c +++ b/arch/arm64/kernel/suspend.c @@ -41,7 +41,7 @@ void notrace __cpu_suspend_save(struct cpu_suspend_ctx *ptr, * time the notifier runs debug exceptions might have been enabled already, * with HW breakpoints registers content still in an unknown state. */ -void (*hw_breakpoint_restore)(void *); +static void (*hw_breakpoint_restore)(void *); void __init cpu_suspend_set_dbg_restorer(void (*hw_bp_restore)(void *)) { /* Prevent multiple restore hook initializations */ diff --git a/arch/arm64/kernel/vdso/Makefile b/arch/arm64/kernel/vdso/Makefile index f6fe17d88da5..b467fd0a384b 100644 --- a/arch/arm64/kernel/vdso/Makefile +++ b/arch/arm64/kernel/vdso/Makefile @@ -15,6 +15,9 @@ ccflags-y := -shared -fno-common -fno-builtin ccflags-y += -nostdlib -Wl,-soname=linux-vdso.so.1 \ $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) +# Disable gcov profiling for VDSO code +GCOV_PROFILE := n + # Workaround for bare-metal (ELF) toolchains that neglect to pass -shared # down to collect2, resulting in silent corruption of the vDSO image. ccflags-y += -Wl,-shared diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index c2fa6b56613c..e3f563c81c48 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -146,7 +146,7 @@ static void alloc_init_pte(pmd_t *pmd, unsigned long addr, if (((addr | next | phys) & ~CONT_MASK) == 0) { /* a block of CONT_PTES */ __populate_init_pte(pte, addr, next, phys, - prot | __pgprot(PTE_CONT)); + __pgprot(pgprot_val(prot) | PTE_CONT)); } else { /* * If the range being split is already inside of a @@ -165,7 +165,7 @@ static void alloc_init_pte(pmd_t *pmd, unsigned long addr, } while (addr != end); } -void split_pud(pud_t *old_pud, pmd_t *pmd) +static void split_pud(pud_t *old_pud, pmd_t *pmd) { unsigned long addr = pud_pfn(*old_pud) << PAGE_SHIFT; pgprot_t prot = __pgprot(pud_val(*old_pud) ^ addr); @@ -447,7 +447,7 @@ static void __init map_mem(void) memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE); } -void __init fixup_executable(void) +static void __init fixup_executable(void) { #ifdef CONFIG_DEBUG_RODATA /* now that we are actually fully mapped, make the start/end more fine grained */ @@ -691,7 +691,7 @@ void __set_fixmap(enum fixed_addresses idx, void *__init fixmap_remap_fdt(phys_addr_t dt_phys) { const u64 dt_virt_base = __fix_to_virt(FIX_FDT); - pgprot_t prot = PAGE_KERNEL | PTE_RDONLY; + pgprot_t prot = PAGE_KERNEL_RO; int size, offset; void *dt_virt; diff --git a/arch/arm64/net/bpf_jit.h b/arch/arm64/net/bpf_jit.h index 98a26ce82d26..aee5637ea436 100644 --- a/arch/arm64/net/bpf_jit.h +++ b/arch/arm64/net/bpf_jit.h @@ -1,7 +1,7 @@ /* * BPF JIT compiler for ARM64 * - * Copyright (C) 2014 Zi Shen Lim <zlim.lnx@gmail.com> + * Copyright (C) 2014-2015 Zi Shen Lim <zlim.lnx@gmail.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -35,6 +35,7 @@ aarch64_insn_gen_comp_branch_imm(0, offset, Rt, A64_VARIANT(sf), \ AARCH64_INSN_BRANCH_COMP_##type) #define A64_CBZ(sf, Rt, imm19) A64_COMP_BRANCH(sf, Rt, (imm19) << 2, ZERO) +#define A64_CBNZ(sf, Rt, imm19) A64_COMP_BRANCH(sf, Rt, (imm19) << 2, NONZERO) /* Conditional branch (immediate) */ #define A64_COND_BRANCH(cond, offset) \ diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c index a44e5293c6f5..cf3c7d4a1b58 100644 --- a/arch/arm64/net/bpf_jit_comp.c +++ b/arch/arm64/net/bpf_jit_comp.c @@ -1,7 +1,7 @@ /* * BPF JIT compiler for ARM64 * - * Copyright (C) 2014 Zi Shen Lim <zlim.lnx@gmail.com> + * Copyright (C) 2014-2015 Zi Shen Lim <zlim.lnx@gmail.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -225,6 +225,17 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) u8 jmp_cond; s32 jmp_offset; +#define check_imm(bits, imm) do { \ + if ((((imm) > 0) && ((imm) >> (bits))) || \ + (((imm) < 0) && (~(imm) >> (bits)))) { \ + pr_info("[%2d] imm=%d(0x%x) out of range\n", \ + i, imm, imm); \ + return -EINVAL; \ + } \ +} while (0) +#define check_imm19(imm) check_imm(19, imm) +#define check_imm26(imm) check_imm(26, imm) + switch (code) { /* dst = src */ case BPF_ALU | BPF_MOV | BPF_X: @@ -258,15 +269,33 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) break; case BPF_ALU | BPF_DIV | BPF_X: case BPF_ALU64 | BPF_DIV | BPF_X: - emit(A64_UDIV(is64, dst, dst, src), ctx); - break; case BPF_ALU | BPF_MOD | BPF_X: case BPF_ALU64 | BPF_MOD | BPF_X: - ctx->tmp_used = 1; - emit(A64_UDIV(is64, tmp, dst, src), ctx); - emit(A64_MUL(is64, tmp, tmp, src), ctx); - emit(A64_SUB(is64, dst, dst, tmp), ctx); + { + const u8 r0 = bpf2a64[BPF_REG_0]; + + /* if (src == 0) return 0 */ + jmp_offset = 3; /* skip ahead to else path */ + check_imm19(jmp_offset); + emit(A64_CBNZ(is64, src, jmp_offset), ctx); + emit(A64_MOVZ(1, r0, 0, 0), ctx); + jmp_offset = epilogue_offset(ctx); + check_imm26(jmp_offset); + emit(A64_B(jmp_offset), ctx); + /* else */ + switch (BPF_OP(code)) { + case BPF_DIV: + emit(A64_UDIV(is64, dst, dst, src), ctx); + break; + case BPF_MOD: + ctx->tmp_used = 1; + emit(A64_UDIV(is64, tmp, dst, src), ctx); + emit(A64_MUL(is64, tmp, tmp, src), ctx); + emit(A64_SUB(is64, dst, dst, tmp), ctx); + break; + } break; + } case BPF_ALU | BPF_LSH | BPF_X: case BPF_ALU64 | BPF_LSH | BPF_X: emit(A64_LSLV(is64, dst, dst, src), ctx); @@ -393,17 +422,6 @@ emit_bswap_uxt: emit(A64_ASR(is64, dst, dst, imm), ctx); break; -#define check_imm(bits, imm) do { \ - if ((((imm) > 0) && ((imm) >> (bits))) || \ - (((imm) < 0) && (~(imm) >> (bits)))) { \ - pr_info("[%2d] imm=%d(0x%x) out of range\n", \ - i, imm, imm); \ - return -EINVAL; \ - } \ -} while (0) -#define check_imm19(imm) check_imm(19, imm) -#define check_imm26(imm) check_imm(26, imm) - /* JUMP off */ case BPF_JMP | BPF_JA: jmp_offset = bpf2a64_offset(i + off, i, ctx); |