From 917538e212a2c080af95ccb4376c5387fac08176 Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Tue, 6 Feb 2018 15:36:44 -0800 Subject: kasan: clean up KASAN_SHADOW_SCALE_SHIFT usage Right now the fact that KASAN uses a single shadow byte for 8 bytes of memory is scattered all over the code. This change defines KASAN_SHADOW_SCALE_SHIFT early in asm include files and makes use of this constant where necessary. [akpm@linux-foundation.org: coding-style fixes] Link: http://lkml.kernel.org/r/34937ca3b90736eaad91b568edf5684091f662e3.1515775666.git.andreyknvl@google.com Signed-off-by: Andrey Konovalov Acked-by: Andrey Ryabinin Cc: Dmitry Vyukov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/arm64/include/asm/kasan.h | 17 ++++++++++------- arch/arm64/include/asm/memory.h | 3 ++- arch/arm64/mm/kasan_init.c | 3 ++- arch/x86/include/asm/kasan.h | 12 ++++++++---- 4 files changed, 22 insertions(+), 13 deletions(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/kasan.h b/arch/arm64/include/asm/kasan.h index e266f80e45b7..8758bb008436 100644 --- a/arch/arm64/include/asm/kasan.h +++ b/arch/arm64/include/asm/kasan.h @@ -12,7 +12,8 @@ /* * KASAN_SHADOW_START: beginning of the kernel virtual addresses. - * KASAN_SHADOW_END: KASAN_SHADOW_START + 1/8 of kernel virtual addresses. + * KASAN_SHADOW_END: KASAN_SHADOW_START + 1/N of kernel virtual addresses, + * where N = (1 << KASAN_SHADOW_SCALE_SHIFT). */ #define KASAN_SHADOW_START (VA_START) #define KASAN_SHADOW_END (KASAN_SHADOW_START + KASAN_SHADOW_SIZE) @@ -20,14 +21,16 @@ /* * This value is used to map an address to the corresponding shadow * address by the following formula: - * shadow_addr = (address >> 3) + KASAN_SHADOW_OFFSET; + * shadow_addr = (address >> KASAN_SHADOW_SCALE_SHIFT) + KASAN_SHADOW_OFFSET * - * (1 << 61) shadow addresses - [KASAN_SHADOW_OFFSET,KASAN_SHADOW_END] - * cover all 64-bits of virtual addresses. So KASAN_SHADOW_OFFSET - * should satisfy the following equation: - * KASAN_SHADOW_OFFSET = KASAN_SHADOW_END - (1ULL << 61) + * (1 << (64 - KASAN_SHADOW_SCALE_SHIFT)) shadow addresses that lie in range + * [KASAN_SHADOW_OFFSET, KASAN_SHADOW_END) cover all 64-bits of virtual + * addresses. So KASAN_SHADOW_OFFSET should satisfy the following equation: + * KASAN_SHADOW_OFFSET = KASAN_SHADOW_END - + * (1ULL << (64 - KASAN_SHADOW_SCALE_SHIFT)) */ -#define KASAN_SHADOW_OFFSET (KASAN_SHADOW_END - (1ULL << (64 - 3))) +#define KASAN_SHADOW_OFFSET (KASAN_SHADOW_END - (1ULL << \ + (64 - KASAN_SHADOW_SCALE_SHIFT))) void kasan_init(void); void kasan_copy_shadow(pgd_t *pgdir); diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index d4bae7d6e0d8..50fa96a49792 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -85,7 +85,8 @@ * stack size when KASAN is in use. */ #ifdef CONFIG_KASAN -#define KASAN_SHADOW_SIZE (UL(1) << (VA_BITS - 3)) +#define KASAN_SHADOW_SCALE_SHIFT 3 +#define KASAN_SHADOW_SIZE (UL(1) << (VA_BITS - KASAN_SHADOW_SCALE_SHIFT)) #define KASAN_THREAD_SHIFT 1 #else #define KASAN_SHADOW_SIZE (0) diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c index acba49fb5aac..6e02e6fb4c7b 100644 --- a/arch/arm64/mm/kasan_init.c +++ b/arch/arm64/mm/kasan_init.c @@ -135,7 +135,8 @@ static void __init kasan_pgd_populate(unsigned long addr, unsigned long end, /* The early shadow maps everything to a single page of zeroes */ asmlinkage void __init kasan_early_init(void) { - BUILD_BUG_ON(KASAN_SHADOW_OFFSET != KASAN_SHADOW_END - (1UL << 61)); + BUILD_BUG_ON(KASAN_SHADOW_OFFSET != + KASAN_SHADOW_END - (1UL << (64 - KASAN_SHADOW_SCALE_SHIFT))); BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_START, PGDIR_SIZE)); BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, PGDIR_SIZE)); kasan_pgd_populate(KASAN_SHADOW_START, KASAN_SHADOW_END, NUMA_NO_NODE, diff --git a/arch/x86/include/asm/kasan.h b/arch/x86/include/asm/kasan.h index b577dd0916aa..13e70da38bed 100644 --- a/arch/x86/include/asm/kasan.h +++ b/arch/x86/include/asm/kasan.h @@ -4,6 +4,7 @@ #include #define KASAN_SHADOW_OFFSET _AC(CONFIG_KASAN_SHADOW_OFFSET, UL) +#define KASAN_SHADOW_SCALE_SHIFT 3 /* * Compiler uses shadow offset assuming that addresses start @@ -12,12 +13,15 @@ * 'kernel address space start' >> KASAN_SHADOW_SCALE_SHIFT */ #define KASAN_SHADOW_START (KASAN_SHADOW_OFFSET + \ - ((-1UL << __VIRTUAL_MASK_SHIFT) >> 3)) + ((-1UL << __VIRTUAL_MASK_SHIFT) >> \ + KASAN_SHADOW_SCALE_SHIFT)) /* - * 47 bits for kernel address -> (47 - 3) bits for shadow - * 56 bits for kernel address -> (56 - 3) bits for shadow + * 47 bits for kernel address -> (47 - KASAN_SHADOW_SCALE_SHIFT) bits for shadow + * 56 bits for kernel address -> (56 - KASAN_SHADOW_SCALE_SHIFT) bits for shadow */ -#define KASAN_SHADOW_END (KASAN_SHADOW_START + (1ULL << (__VIRTUAL_MASK_SHIFT - 3))) +#define KASAN_SHADOW_END (KASAN_SHADOW_START + \ + (1ULL << (__VIRTUAL_MASK_SHIFT - \ + KASAN_SHADOW_SCALE_SHIFT))) #ifndef __ASSEMBLY__ -- cgit v1.2.3 From 2bc2f688fdf8808de4f36be563ccdb0bde7c0c54 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Tue, 6 Feb 2018 15:37:41 -0800 Subject: Makefile: move stack-protector availability out of Kconfig Various portions of the kernel, especially per-architecture pieces, need to know if the compiler is building with the stack protector. This was done in the arch/Kconfig with 'select', but this doesn't allow a way to do auto-detected compiler support. In preparation for creating an on-if-available default, move the logic for the definition of CONFIG_CC_STACKPROTECTOR into the Makefile. Link: http://lkml.kernel.org/r/1510076320-69931-3-git-send-email-keescook@chromium.org Signed-off-by: Kees Cook Tested-by: Laura Abbott Cc: Masahiro Yamada Cc: Arnd Bergmann Cc: Josh Triplett Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- Makefile | 6 +++++- arch/Kconfig | 8 -------- arch/x86/Kconfig | 2 +- 3 files changed, 6 insertions(+), 10 deletions(-) (limited to 'arch') diff --git a/Makefile b/Makefile index 20b1aa928d78..f0f934097f64 100644 --- a/Makefile +++ b/Makefile @@ -693,7 +693,7 @@ else endif endif # Find arch-specific stack protector compiler sanity-checking script. -ifdef CONFIG_CC_STACKPROTECTOR +ifdef stackp-name stackp-path := $(srctree)/scripts/gcc-$(SRCARCH)_$(BITS)-has-stack-protector.sh stackp-check := $(wildcard $(stackp-path)) # If the wildcard test matches a test script, run it to check functionality. @@ -702,6 +702,10 @@ ifdef CONFIG_CC_STACKPROTECTOR stackp-broken := y endif endif + ifndef stackp-broken + # If the stack protector is functional, enable code that depends on it. + KBUILD_CPPFLAGS += -DCONFIG_CC_STACKPROTECTOR + endif endif KBUILD_CFLAGS += $(stackp-flag) diff --git a/arch/Kconfig b/arch/Kconfig index 467dfa35bf96..9330a9d79dc3 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -538,12 +538,6 @@ config HAVE_CC_STACKPROTECTOR - its compiler supports the -fstack-protector option - it has implemented a stack canary (e.g. __stack_chk_guard) -config CC_STACKPROTECTOR - def_bool n - help - Set when a stack-protector mode is enabled, so that the build - can enable kernel-side support for the GCC feature. - choice prompt "Stack Protector buffer overflow detection" depends on HAVE_CC_STACKPROTECTOR @@ -564,7 +558,6 @@ config CC_STACKPROTECTOR_NONE config CC_STACKPROTECTOR_REGULAR bool "Regular" - select CC_STACKPROTECTOR help Functions will have the stack-protector canary logic added if they have an 8-byte or larger character array on the stack. @@ -578,7 +571,6 @@ config CC_STACKPROTECTOR_REGULAR config CC_STACKPROTECTOR_STRONG bool "Strong" - select CC_STACKPROTECTOR help Functions will have the stack-protector canary logic added in any of the following conditions: diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index b0771ceabb4b..d9878f1f3bac 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -322,7 +322,7 @@ config X86_64_SMP config X86_32_LAZY_GS def_bool y - depends on X86_32 && !CC_STACKPROTECTOR + depends on X86_32 && CC_STACKPROTECTOR_NONE config ARCH_SUPPORTS_UPROBES def_bool y -- cgit v1.2.3 From 44c6dc940b190cf22b044a784f3e00a7e7f08b2f Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Tue, 6 Feb 2018 15:37:45 -0800 Subject: Makefile: introduce CONFIG_CC_STACKPROTECTOR_AUTO Nearly all modern compilers support a stack-protector option, and nearly all modern distributions enable the kernel stack-protector, so enabling this by default in kernel builds would make sense. However, Kconfig does not have knowledge of available compiler features, so it isn't safe to force on, as this would unconditionally break builds for the compilers or architectures that don't have support. Instead, this introduces a new option, CONFIG_CC_STACKPROTECTOR_AUTO, which attempts to discover the best possible stack-protector available, and will allow builds to proceed even if the compiler doesn't support any stack-protector. This option is made the default so that kernels built with modern compilers will be protected-by-default against stack buffer overflows, avoiding things like the recent BlueBorne attack. Selection of a specific stack-protector option remains available, including disabling it. Additionally, tiny.config is adjusted to use CC_STACKPROTECTOR_NONE, since that's the option with the least code size (and it used to be the default, so we have to explicitly choose it there now). Link: http://lkml.kernel.org/r/1510076320-69931-4-git-send-email-keescook@chromium.org Signed-off-by: Kees Cook Tested-by: Laura Abbott Cc: Masahiro Yamada Cc: Arnd Bergmann Cc: Josh Triplett Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- Makefile | 37 ++++++++++++++++++++++++++++++++++--- arch/Kconfig | 8 +++++++- kernel/configs/tiny.config | 4 ++++ 3 files changed, 45 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/Makefile b/Makefile index f0f934097f64..d192dd826cce 100644 --- a/Makefile +++ b/Makefile @@ -680,6 +680,10 @@ endif # This selects the stack protector compiler flag. Testing it is delayed # until after .config has been reprocessed, in the prepare-compiler-check # target. +ifdef CONFIG_CC_STACKPROTECTOR_AUTO + stackp-flag := $(call cc-option,-fstack-protector-strong,$(call cc-option,-fstack-protector)) + stackp-name := AUTO +else ifdef CONFIG_CC_STACKPROTECTOR_REGULAR stackp-flag := -fstack-protector stackp-name := REGULAR @@ -688,12 +692,18 @@ ifdef CONFIG_CC_STACKPROTECTOR_STRONG stackp-flag := -fstack-protector-strong stackp-name := STRONG else + # If either there is no stack protector for this architecture or + # CONFIG_CC_STACKPROTECTOR_NONE is selected, we're done, and $(stackp-name) + # is empty, skipping all remaining stack protector tests. + # # Force off for distro compilers that enable stack protector by default. - stackp-flag := $(call cc-option, -fno-stack-protector) + KBUILD_CFLAGS += $(call cc-option, -fno-stack-protector) +endif endif endif # Find arch-specific stack protector compiler sanity-checking script. ifdef stackp-name +ifneq ($(stackp-flag),) stackp-path := $(srctree)/scripts/gcc-$(SRCARCH)_$(BITS)-has-stack-protector.sh stackp-check := $(wildcard $(stackp-path)) # If the wildcard test matches a test script, run it to check functionality. @@ -705,9 +715,17 @@ ifdef stackp-name ifndef stackp-broken # If the stack protector is functional, enable code that depends on it. KBUILD_CPPFLAGS += -DCONFIG_CC_STACKPROTECTOR + # Either we've already detected the flag (for AUTO) or we'll fail the + # build in the prepare-compiler-check rule (for specific flag). + KBUILD_CFLAGS += $(stackp-flag) + else + # We have to make sure stack protector is unconditionally disabled if + # the compiler is broken (in case we're going to continue the build in + # AUTO mode). + KBUILD_CFLAGS += $(call cc-option, -fno-stack-protector) endif endif -KBUILD_CFLAGS += $(stackp-flag) +endif ifeq ($(cc-name),clang) KBUILD_CPPFLAGS += $(call cc-option,-Qunused-arguments,) @@ -1102,15 +1120,28 @@ PHONY += prepare-compiler-check prepare-compiler-check: FORCE # Make sure compiler supports requested stack protector flag. ifdef stackp-name + # Warn about CONFIG_CC_STACKPROTECTOR_AUTO having found no option. + ifeq ($(stackp-flag),) + @echo CONFIG_CC_STACKPROTECTOR_$(stackp-name): \ + Compiler does not support any known stack-protector >&2 + else + # Fail if specifically requested stack protector is missing. ifeq ($(call cc-option, $(stackp-flag)),) @echo Cannot use CONFIG_CC_STACKPROTECTOR_$(stackp-name): \ $(stackp-flag) not supported by compiler >&2 && exit 1 endif + endif endif -# Make sure compiler does not have buggy stack-protector support. +# Make sure compiler does not have buggy stack-protector support. If a +# specific stack-protector was requested, fail the build, otherwise warn. ifdef stackp-broken + ifeq ($(stackp-name),AUTO) + @echo CONFIG_CC_STACKPROTECTOR_$(stackp-name): \ + $(stackp-flag) available but compiler is broken: disabling >&2 + else @echo Cannot use CONFIG_CC_STACKPROTECTOR_$(stackp-name): \ $(stackp-flag) available but compiler is broken >&2 && exit 1 + endif endif @: diff --git a/arch/Kconfig b/arch/Kconfig index 9330a9d79dc3..76c0b54443b1 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -541,7 +541,7 @@ config HAVE_CC_STACKPROTECTOR choice prompt "Stack Protector buffer overflow detection" depends on HAVE_CC_STACKPROTECTOR - default CC_STACKPROTECTOR_NONE + default CC_STACKPROTECTOR_AUTO help This option turns on the "stack-protector" GCC feature. This feature puts, at the beginning of functions, a canary value on @@ -588,6 +588,12 @@ config CC_STACKPROTECTOR_STRONG about 20% of all kernel functions, which increases the kernel code size by about 2%. +config CC_STACKPROTECTOR_AUTO + bool "Automatic" + help + If the compiler supports it, the best available stack-protector + option will be chosen. + endchoice config THIN_ARCHIVES diff --git a/kernel/configs/tiny.config b/kernel/configs/tiny.config index 7fa0c4ae6394..9bfdffc100da 100644 --- a/kernel/configs/tiny.config +++ b/kernel/configs/tiny.config @@ -10,3 +10,7 @@ CONFIG_OPTIMIZE_INLINING=y # CONFIG_SLAB is not set # CONFIG_SLUB is not set CONFIG_SLOB=y +CONFIG_CC_STACKPROTECTOR_NONE=y +# CONFIG_CC_STACKPROTECTOR_REGULAR is not set +# CONFIG_CC_STACKPROTECTOR_STRONG is not set +# CONFIG_CC_STACKPROTECTOR_AUTO is not set -- cgit v1.2.3 From 3aa56885e51683a19c8aa71739fd279b3f501cd7 Mon Sep 17 00:00:00 2001 From: Yury Norov Date: Tue, 6 Feb 2018 15:38:06 -0800 Subject: bitmap: replace bitmap_{from,to}_u32array with bitmap_{from,to}_arr32 over the kernel. Additionally to it: * __check_eq_bitmap() now takes single nbits argument. * __check_eq_u32_array is not used in new test but may be used in future. So I don't remove it here, but annotate as __used. Tested on arm64 and 32-bit BE mips. [arnd@arndb.de: perf: arm_dsu_pmu: convert to bitmap_from_arr32] Link: http://lkml.kernel.org/r/20180201172508.5739-2-ynorov@caviumnetworks.com [ynorov@caviumnetworks.com: fix net/core/ethtool.c] Link: http://lkml.kernel.org/r/20180205071747.4ekxtsbgxkj5b2fz@yury-thinkpad Link: http://lkml.kernel.org/r/20171228150019.27953-2-ynorov@caviumnetworks.com Signed-off-by: Yury Norov Signed-off-by: Arnd Bergmann Cc: Ben Hutchings Cc: David Decotigny , Cc: David S. Miller , Cc: Geert Uytterhoeven Cc: Matthew Wilcox Cc: Rasmus Villemoes Cc: Heiner Kallweit Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/arm64/kernel/perf_event.c | 5 +- drivers/perf/arm_dsu_pmu.c | 6 +- include/linux/bitmap.h | 11 +-- lib/bitmap.c | 87 ----------------- lib/test_bitmap.c | 206 +++++++---------------------------------- net/core/ethtool.c | 53 +++++------ 6 files changed, 57 insertions(+), 311 deletions(-) (limited to 'arch') diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c index 3affca3dd96a..75b220ba73a3 100644 --- a/arch/arm64/kernel/perf_event.c +++ b/arch/arm64/kernel/perf_event.c @@ -925,9 +925,8 @@ static void __armv8pmu_probe_pmu(void *info) pmceid[0] = read_sysreg(pmceid0_el0); pmceid[1] = read_sysreg(pmceid1_el0); - bitmap_from_u32array(cpu_pmu->pmceid_bitmap, - ARMV8_PMUV3_MAX_COMMON_EVENTS, pmceid, - ARRAY_SIZE(pmceid)); + bitmap_from_arr32(cpu_pmu->pmceid_bitmap, + pmceid, ARMV8_PMUV3_MAX_COMMON_EVENTS); } static int armv8pmu_probe_pmu(struct arm_pmu *cpu_pmu) diff --git a/drivers/perf/arm_dsu_pmu.c b/drivers/perf/arm_dsu_pmu.c index 93c50e377507..38f2cc2a6c74 100644 --- a/drivers/perf/arm_dsu_pmu.c +++ b/drivers/perf/arm_dsu_pmu.c @@ -658,10 +658,8 @@ static void dsu_pmu_probe_pmu(struct dsu_pmu *dsu_pmu) return; cpmceid[0] = __dsu_pmu_read_pmceid(0); cpmceid[1] = __dsu_pmu_read_pmceid(1); - bitmap_from_u32array(dsu_pmu->cpmceid_bitmap, - DSU_PMU_MAX_COMMON_EVENTS, - cpmceid, - ARRAY_SIZE(cpmceid)); + bitmap_from_arr32(dsu_pmu->cpmceid_bitmap, cpmceid, + DSU_PMU_MAX_COMMON_EVENTS); } static void dsu_pmu_set_active_cpu(int cpu, struct dsu_pmu *dsu_pmu) diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h index dac9dff90350..e43533ec7660 100644 --- a/include/linux/bitmap.h +++ b/include/linux/bitmap.h @@ -64,8 +64,6 @@ * bitmap_find_free_region(bitmap, bits, order) Find and allocate bit region * bitmap_release_region(bitmap, pos, order) Free specified bit region * bitmap_allocate_region(bitmap, pos, order) Allocate specified bit region - * bitmap_from_u32array(dst, nbits, buf, nwords) *dst = *buf (nwords 32b words) - * bitmap_to_u32array(buf, nwords, src, nbits) *buf = *dst (nwords 32b words) * bitmap_from_arr32(dst, buf, nbits) Copy nbits from u32[] buf to dst * bitmap_to_arr32(buf, src, nbits) Copy nbits from buf to u32[] dst * @@ -176,14 +174,7 @@ extern void bitmap_fold(unsigned long *dst, const unsigned long *orig, extern int bitmap_find_free_region(unsigned long *bitmap, unsigned int bits, int order); extern void bitmap_release_region(unsigned long *bitmap, unsigned int pos, int order); extern int bitmap_allocate_region(unsigned long *bitmap, unsigned int pos, int order); -extern unsigned int bitmap_from_u32array(unsigned long *bitmap, - unsigned int nbits, - const u32 *buf, - unsigned int nwords); -extern unsigned int bitmap_to_u32array(u32 *buf, - unsigned int nwords, - const unsigned long *bitmap, - unsigned int nbits); + #ifdef __BIG_ENDIAN extern void bitmap_copy_le(unsigned long *dst, const unsigned long *src, unsigned int nbits); #else diff --git a/lib/bitmap.c b/lib/bitmap.c index 47fe6441562c..9e498c77ed0e 100644 --- a/lib/bitmap.c +++ b/lib/bitmap.c @@ -1105,93 +1105,6 @@ int bitmap_allocate_region(unsigned long *bitmap, unsigned int pos, int order) } EXPORT_SYMBOL(bitmap_allocate_region); -/** - * bitmap_from_u32array - copy the contents of a u32 array of bits to bitmap - * @bitmap: array of unsigned longs, the destination bitmap, non NULL - * @nbits: number of bits in @bitmap - * @buf: array of u32 (in host byte order), the source bitmap, non NULL - * @nwords: number of u32 words in @buf - * - * copy min(nbits, 32*nwords) bits from @buf to @bitmap, remaining - * bits between nword and nbits in @bitmap (if any) are cleared. In - * last word of @bitmap, the bits beyond nbits (if any) are kept - * unchanged. - * - * Return the number of bits effectively copied. - */ -unsigned int -bitmap_from_u32array(unsigned long *bitmap, unsigned int nbits, - const u32 *buf, unsigned int nwords) -{ - unsigned int dst_idx, src_idx; - - for (src_idx = dst_idx = 0; dst_idx < BITS_TO_LONGS(nbits); ++dst_idx) { - unsigned long part = 0; - - if (src_idx < nwords) - part = buf[src_idx++]; - -#if BITS_PER_LONG == 64 - if (src_idx < nwords) - part |= ((unsigned long) buf[src_idx++]) << 32; -#endif - - if (dst_idx < nbits/BITS_PER_LONG) - bitmap[dst_idx] = part; - else { - unsigned long mask = BITMAP_LAST_WORD_MASK(nbits); - - bitmap[dst_idx] = (bitmap[dst_idx] & ~mask) - | (part & mask); - } - } - - return min_t(unsigned int, nbits, 32*nwords); -} -EXPORT_SYMBOL(bitmap_from_u32array); - -/** - * bitmap_to_u32array - copy the contents of bitmap to a u32 array of bits - * @buf: array of u32 (in host byte order), the dest bitmap, non NULL - * @nwords: number of u32 words in @buf - * @bitmap: array of unsigned longs, the source bitmap, non NULL - * @nbits: number of bits in @bitmap - * - * copy min(nbits, 32*nwords) bits from @bitmap to @buf. Remaining - * bits after nbits in @buf (if any) are cleared. - * - * Return the number of bits effectively copied. - */ -unsigned int -bitmap_to_u32array(u32 *buf, unsigned int nwords, - const unsigned long *bitmap, unsigned int nbits) -{ - unsigned int dst_idx = 0, src_idx = 0; - - while (dst_idx < nwords) { - unsigned long part = 0; - - if (src_idx < BITS_TO_LONGS(nbits)) { - part = bitmap[src_idx]; - if (src_idx >= nbits/BITS_PER_LONG) - part &= BITMAP_LAST_WORD_MASK(nbits); - src_idx++; - } - - buf[dst_idx++] = part & 0xffffffffUL; - -#if BITS_PER_LONG == 64 - if (dst_idx < nwords) { - part >>= 32; - buf[dst_idx++] = part & 0xffffffffUL; - } -#endif - } - - return min_t(unsigned int, nbits, 32*nwords); -} -EXPORT_SYMBOL(bitmap_to_u32array); - /** * bitmap_copy_le - copy a bitmap, putting the bits into little-endian order. * @dst: destination buffer diff --git a/lib/test_bitmap.c b/lib/test_bitmap.c index aa1f2669bdd5..de7ef2996a07 100644 --- a/lib/test_bitmap.c +++ b/lib/test_bitmap.c @@ -23,7 +23,7 @@ __check_eq_uint(const char *srcfile, unsigned int line, const unsigned int exp_uint, unsigned int x) { if (exp_uint != x) { - pr_warn("[%s:%u] expected %u, got %u\n", + pr_err("[%s:%u] expected %u, got %u\n", srcfile, line, exp_uint, x); return false; } @@ -33,19 +33,13 @@ __check_eq_uint(const char *srcfile, unsigned int line, static bool __init __check_eq_bitmap(const char *srcfile, unsigned int line, - const unsigned long *exp_bmap, unsigned int exp_nbits, - const unsigned long *bmap, unsigned int nbits) + const unsigned long *exp_bmap, const unsigned long *bmap, + unsigned int nbits) { - if (exp_nbits != nbits) { - pr_warn("[%s:%u] bitmap length mismatch: expected %u, got %u\n", - srcfile, line, exp_nbits, nbits); - return false; - } - if (!bitmap_equal(exp_bmap, bmap, nbits)) { pr_warn("[%s:%u] bitmaps contents differ: expected \"%*pbl\", got \"%*pbl\"\n", srcfile, line, - exp_nbits, exp_bmap, nbits, bmap); + nbits, exp_bmap, nbits, bmap); return false; } return true; @@ -66,6 +60,10 @@ __check_eq_pbl(const char *srcfile, unsigned int line, return true; } +static bool __init +__check_eq_u32_array(const char *srcfile, unsigned int line, + const u32 *exp_arr, unsigned int exp_len, + const u32 *arr, unsigned int len) __used; static bool __init __check_eq_u32_array(const char *srcfile, unsigned int line, const u32 *exp_arr, unsigned int exp_len, @@ -255,171 +253,29 @@ static void __init test_bitmap_parselist(void) } } -static void __init test_bitmap_u32_array_conversions(void) +static void __init test_bitmap_arr32(void) { - DECLARE_BITMAP(bmap1, 1024); - DECLARE_BITMAP(bmap2, 1024); - u32 exp_arr[32], arr[32]; - unsigned nbits; - - for (nbits = 0 ; nbits < 257 ; ++nbits) { - const unsigned int used_u32s = DIV_ROUND_UP(nbits, 32); - unsigned int i, rv; - - bitmap_zero(bmap1, nbits); - bitmap_set(bmap1, nbits, 1024 - nbits); /* garbage */ - - memset(arr, 0xff, sizeof(arr)); - rv = bitmap_to_u32array(arr, used_u32s, bmap1, nbits); - expect_eq_uint(nbits, rv); - - memset(exp_arr, 0xff, sizeof(exp_arr)); - memset(exp_arr, 0, used_u32s*sizeof(*exp_arr)); - expect_eq_u32_array(exp_arr, 32, arr, 32); - - bitmap_fill(bmap2, 1024); - rv = bitmap_from_u32array(bmap2, nbits, arr, used_u32s); - expect_eq_uint(nbits, rv); - expect_eq_bitmap(bmap1, 1024, bmap2, 1024); - - for (i = 0 ; i < nbits ; ++i) { - /* - * test conversion bitmap -> u32[] - */ - - bitmap_zero(bmap1, 1024); - __set_bit(i, bmap1); - bitmap_set(bmap1, nbits, 1024 - nbits); /* garbage */ - - memset(arr, 0xff, sizeof(arr)); - rv = bitmap_to_u32array(arr, used_u32s, bmap1, nbits); - expect_eq_uint(nbits, rv); - - /* 1st used u32 words contain expected bit set, the - * remaining words are left unchanged (0xff) - */ - memset(exp_arr, 0xff, sizeof(exp_arr)); - memset(exp_arr, 0, used_u32s*sizeof(*exp_arr)); - exp_arr[i/32] = (1U<<(i%32)); - expect_eq_u32_array(exp_arr, 32, arr, 32); - - - /* same, with longer array to fill - */ - memset(arr, 0xff, sizeof(arr)); - rv = bitmap_to_u32array(arr, 32, bmap1, nbits); - expect_eq_uint(nbits, rv); - - /* 1st used u32 words contain expected bit set, the - * remaining words are all 0s - */ - memset(exp_arr, 0, sizeof(exp_arr)); - exp_arr[i/32] = (1U<<(i%32)); - expect_eq_u32_array(exp_arr, 32, arr, 32); - - /* - * test conversion u32[] -> bitmap - */ - - /* the 1st nbits of bmap2 are identical to - * bmap1, the remaining bits of bmap2 are left - * unchanged (all 1s) - */ - bitmap_fill(bmap2, 1024); - rv = bitmap_from_u32array(bmap2, nbits, - exp_arr, used_u32s); - expect_eq_uint(nbits, rv); - - expect_eq_bitmap(bmap1, 1024, bmap2, 1024); - - /* same, with more bits to fill - */ - memset(arr, 0xff, sizeof(arr)); /* garbage */ - memset(arr, 0, used_u32s*sizeof(u32)); - arr[i/32] = (1U<<(i%32)); - - bitmap_fill(bmap2, 1024); - rv = bitmap_from_u32array(bmap2, 1024, arr, used_u32s); - expect_eq_uint(used_u32s*32, rv); - - /* the 1st nbits of bmap2 are identical to - * bmap1, the remaining bits of bmap2 are cleared - */ - bitmap_zero(bmap1, 1024); - __set_bit(i, bmap1); - expect_eq_bitmap(bmap1, 1024, bmap2, 1024); - - - /* - * test short conversion bitmap -> u32[] (1 - * word too short) - */ - if (used_u32s > 1) { - bitmap_zero(bmap1, 1024); - __set_bit(i, bmap1); - bitmap_set(bmap1, nbits, - 1024 - nbits); /* garbage */ - memset(arr, 0xff, sizeof(arr)); - - rv = bitmap_to_u32array(arr, used_u32s - 1, - bmap1, nbits); - expect_eq_uint((used_u32s - 1)*32, rv); - - /* 1st used u32 words contain expected - * bit set, the remaining words are - * left unchanged (0xff) - */ - memset(exp_arr, 0xff, sizeof(exp_arr)); - memset(exp_arr, 0, - (used_u32s-1)*sizeof(*exp_arr)); - if ((i/32) < (used_u32s - 1)) - exp_arr[i/32] = (1U<<(i%32)); - expect_eq_u32_array(exp_arr, 32, arr, 32); - } - - /* - * test short conversion u32[] -> bitmap (3 - * bits too short) - */ - if (nbits > 3) { - memset(arr, 0xff, sizeof(arr)); /* garbage */ - memset(arr, 0, used_u32s*sizeof(*arr)); - arr[i/32] = (1U<<(i%32)); - - bitmap_zero(bmap1, 1024); - rv = bitmap_from_u32array(bmap1, nbits - 3, - arr, used_u32s); - expect_eq_uint(nbits - 3, rv); - - /* we are expecting the bit < nbits - - * 3 (none otherwise), and the rest of - * bmap1 unchanged (0-filled) - */ - bitmap_zero(bmap2, 1024); - if (i < nbits - 3) - __set_bit(i, bmap2); - expect_eq_bitmap(bmap2, 1024, bmap1, 1024); - - /* do the same with bmap1 initially - * 1-filled - */ - - bitmap_fill(bmap1, 1024); - rv = bitmap_from_u32array(bmap1, nbits - 3, - arr, used_u32s); - expect_eq_uint(nbits - 3, rv); - - /* we are expecting the bit < nbits - - * 3 (none otherwise), and the rest of - * bmap1 unchanged (1-filled) - */ - bitmap_zero(bmap2, 1024); - if (i < nbits - 3) - __set_bit(i, bmap2); - bitmap_set(bmap2, nbits-3, 1024 - nbits + 3); - expect_eq_bitmap(bmap2, 1024, bmap1, 1024); - } - } + unsigned int nbits, next_bit, len = sizeof(exp) * 8; + u32 arr[sizeof(exp) / 4]; + DECLARE_BITMAP(bmap2, len); + + memset(arr, 0xa5, sizeof(arr)); + + for (nbits = 0; nbits < len; ++nbits) { + bitmap_to_arr32(arr, exp, nbits); + bitmap_from_arr32(bmap2, arr, nbits); + expect_eq_bitmap(bmap2, exp, nbits); + + next_bit = find_next_bit(bmap2, + round_up(nbits, BITS_PER_LONG), nbits); + if (next_bit < round_up(nbits, BITS_PER_LONG)) + pr_err("bitmap_copy_arr32(nbits == %d:" + " tail is not safely cleared: %d\n", + nbits, next_bit); + + if (nbits < len - 32) + expect_eq_uint(arr[DIV_ROUND_UP(nbits, 32)], + 0xa5a5a5a5); } } @@ -454,7 +310,7 @@ static void noinline __init test_mem_optimisations(void) static int __init test_bitmap_init(void) { test_zero_fill_copy(); - test_bitmap_u32_array_conversions(); + test_bitmap_arr32(); test_bitmap_parselist(); test_mem_optimisations(); diff --git a/net/core/ethtool.c b/net/core/ethtool.c index 107b122c8969..494e6a5d7306 100644 --- a/net/core/ethtool.c +++ b/net/core/ethtool.c @@ -616,18 +616,15 @@ static int load_link_ksettings_from_user(struct ethtool_link_ksettings *to, return -EFAULT; memcpy(&to->base, &link_usettings.base, sizeof(to->base)); - bitmap_from_u32array(to->link_modes.supported, - __ETHTOOL_LINK_MODE_MASK_NBITS, - link_usettings.link_modes.supported, - __ETHTOOL_LINK_MODE_MASK_NU32); - bitmap_from_u32array(to->link_modes.advertising, - __ETHTOOL_LINK_MODE_MASK_NBITS, - link_usettings.link_modes.advertising, - __ETHTOOL_LINK_MODE_MASK_NU32); - bitmap_from_u32array(to->link_modes.lp_advertising, - __ETHTOOL_LINK_MODE_MASK_NBITS, - link_usettings.link_modes.lp_advertising, - __ETHTOOL_LINK_MODE_MASK_NU32); + bitmap_from_arr32(to->link_modes.supported, + link_usettings.link_modes.supported, + __ETHTOOL_LINK_MODE_MASK_NBITS); + bitmap_from_arr32(to->link_modes.advertising, + link_usettings.link_modes.advertising, + __ETHTOOL_LINK_MODE_MASK_NBITS); + bitmap_from_arr32(to->link_modes.lp_advertising, + link_usettings.link_modes.lp_advertising, + __ETHTOOL_LINK_MODE_MASK_NBITS); return 0; } @@ -643,18 +640,15 @@ store_link_ksettings_for_user(void __user *to, struct ethtool_link_usettings link_usettings; memcpy(&link_usettings.base, &from->base, sizeof(link_usettings)); - bitmap_to_u32array(link_usettings.link_modes.supported, - __ETHTOOL_LINK_MODE_MASK_NU32, - from->link_modes.supported, - __ETHTOOL_LINK_MODE_MASK_NBITS); - bitmap_to_u32array(link_usettings.link_modes.advertising, - __ETHTOOL_LINK_MODE_MASK_NU32, - from->link_modes.advertising, - __ETHTOOL_LINK_MODE_MASK_NBITS); - bitmap_to_u32array(link_usettings.link_modes.lp_advertising, - __ETHTOOL_LINK_MODE_MASK_NU32, - from->link_modes.lp_advertising, - __ETHTOOL_LINK_MODE_MASK_NBITS); + bitmap_to_arr32(link_usettings.link_modes.supported, + from->link_modes.supported, + __ETHTOOL_LINK_MODE_MASK_NBITS); + bitmap_to_arr32(link_usettings.link_modes.advertising, + from->link_modes.advertising, + __ETHTOOL_LINK_MODE_MASK_NBITS); + bitmap_to_arr32(link_usettings.link_modes.lp_advertising, + from->link_modes.lp_advertising, + __ETHTOOL_LINK_MODE_MASK_NBITS); if (copy_to_user(to, &link_usettings, sizeof(link_usettings))) return -EFAULT; @@ -2358,10 +2352,8 @@ static int ethtool_get_per_queue_coalesce(struct net_device *dev, useraddr += sizeof(*per_queue_opt); - bitmap_from_u32array(queue_mask, - MAX_NUM_QUEUE, - per_queue_opt->queue_mask, - DIV_ROUND_UP(MAX_NUM_QUEUE, 32)); + bitmap_from_arr32(queue_mask, per_queue_opt->queue_mask, + MAX_NUM_QUEUE); for_each_set_bit(bit, queue_mask, MAX_NUM_QUEUE) { struct ethtool_coalesce coalesce = { .cmd = ETHTOOL_GCOALESCE }; @@ -2393,10 +2385,7 @@ static int ethtool_set_per_queue_coalesce(struct net_device *dev, useraddr += sizeof(*per_queue_opt); - bitmap_from_u32array(queue_mask, - MAX_NUM_QUEUE, - per_queue_opt->queue_mask, - DIV_ROUND_UP(MAX_NUM_QUEUE, 32)); + bitmap_from_arr32(queue_mask, per_queue_opt->queue_mask, MAX_NUM_QUEUE); n_queue = bitmap_weight(queue_mask, MAX_NUM_QUEUE); tmp = backup = kmalloc_array(n_queue, sizeof(*backup), GFP_KERNEL); if (!backup) -- cgit v1.2.3 From 0ade34c37012ea5c516d9aa4d19a56e9f40a55ed Mon Sep 17 00:00:00 2001 From: Clement Courbet Date: Tue, 6 Feb 2018 15:38:34 -0800 Subject: lib: optimize cpumask_next_and() We've measured that we spend ~0.6% of sys cpu time in cpumask_next_and(). It's essentially a joined iteration in search for a non-zero bit, which is currently implemented as a lookup join (find a nonzero bit on the lhs, lookup the rhs to see if it's set there). Implement a direct join (find a nonzero bit on the incrementally built join). Also add generic bitmap benchmarks in the new `test_find_bit` module for new function (see `find_next_and_bit` in [2] and [3] below). For cpumask_next_and, direct benchmarking shows that it's 1.17x to 14x faster with a geometric mean of 2.1 on 32 CPUs [1]. No impact on memory usage. Note that on Arm, the new pure-C implementation still outperforms the old one that uses a mix of C and asm (`find_next_bit`) [3]. [1] Approximate benchmark code: ``` unsigned long src1p[nr_cpumask_longs] = {pattern1}; unsigned long src2p[nr_cpumask_longs] = {pattern2}; for (/*a bunch of repetitions*/) { for (int n = -1; n <= nr_cpu_ids; ++n) { asm volatile("" : "+rm"(src1p)); // prevent any optimization asm volatile("" : "+rm"(src2p)); unsigned long result = cpumask_next_and(n, src1p, src2p); asm volatile("" : "+rm"(result)); } } ``` Results: pattern1 pattern2 time_before/time_after 0x0000ffff 0x0000ffff 1.65 0x0000ffff 0x00005555 2.24 0x0000ffff 0x00001111 2.94 0x0000ffff 0x00000000 14.0 0x00005555 0x0000ffff 1.67 0x00005555 0x00005555 1.71 0x00005555 0x00001111 1.90 0x00005555 0x00000000 6.58 0x00001111 0x0000ffff 1.46 0x00001111 0x00005555 1.49 0x00001111 0x00001111 1.45 0x00001111 0x00000000 3.10 0x00000000 0x0000ffff 1.18 0x00000000 0x00005555 1.18 0x00000000 0x00001111 1.17 0x00000000 0x00000000 1.25 ----------------------------- geo.mean 2.06 [2] test_find_next_bit, X86 (skylake) [ 3913.477422] Start testing find_bit() with random-filled bitmap [ 3913.477847] find_next_bit: 160868 cycles, 16484 iterations [ 3913.477933] find_next_zero_bit: 169542 cycles, 16285 iterations [ 3913.478036] find_last_bit: 201638 cycles, 16483 iterations [ 3913.480214] find_first_bit: 4353244 cycles, 16484 iterations [ 3913.480216] Start testing find_next_and_bit() with random-filled bitmap [ 3913.481074] find_next_and_bit: 89604 cycles, 8216 iterations [ 3913.481075] Start testing find_bit() with sparse bitmap [ 3913.481078] find_next_bit: 2536 cycles, 66 iterations [ 3913.481252] find_next_zero_bit: 344404 cycles, 32703 iterations [ 3913.481255] find_last_bit: 2006 cycles, 66 iterations [ 3913.481265] find_first_bit: 17488 cycles, 66 iterations [ 3913.481266] Start testing find_next_and_bit() with sparse bitmap [ 3913.481272] find_next_and_bit: 764 cycles, 1 iterations [3] test_find_next_bit, arm (v7 odroid XU3). [ 267.206928] Start testing find_bit() with random-filled bitmap [ 267.214752] find_next_bit: 4474 cycles, 16419 iterations [ 267.221850] find_next_zero_bit: 5976 cycles, 16350 iterations [ 267.229294] find_last_bit: 4209 cycles, 16419 iterations [ 267.279131] find_first_bit: 1032991 cycles, 16420 iterations [ 267.286265] Start testing find_next_and_bit() with random-filled bitmap [ 267.302386] find_next_and_bit: 2290 cycles, 8140 iterations [ 267.309422] Start testing find_bit() with sparse bitmap [ 267.316054] find_next_bit: 191 cycles, 66 iterations [ 267.322726] find_next_zero_bit: 8758 cycles, 32703 iterations [ 267.329803] find_last_bit: 84 cycles, 66 iterations [ 267.336169] find_first_bit: 4118 cycles, 66 iterations [ 267.342627] Start testing find_next_and_bit() with sparse bitmap [ 267.356919] find_next_and_bit: 91 cycles, 1 iterations [courbet@google.com: v6] Link: http://lkml.kernel.org/r/20171129095715.23430-1-courbet@google.com [geert@linux-m68k.org: m68k/bitops: always include ] Link: http://lkml.kernel.org/r/1512556816-28627-1-git-send-email-geert@linux-m68k.org Link: http://lkml.kernel.org/r/20171128131334.23491-1-courbet@google.com Signed-off-by: Clement Courbet Signed-off-by: Geert Uytterhoeven Cc: Yury Norov Cc: Geert Uytterhoeven Cc: Alexey Dobriyan Cc: Rasmus Villemoes Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/arm/include/asm/bitops.h | 1 + arch/m68k/include/asm/bitops.h | 3 +- arch/unicore32/include/asm/bitops.h | 2 ++ include/asm-generic/bitops/find.h | 20 +++++++++++ include/linux/bitmap.h | 6 +++- lib/cpumask.c | 9 ++--- lib/find_bit.c | 59 ++++++++++++++++++++++++--------- lib/find_bit_benchmark.c | 25 +++++++++++++- tools/include/asm-generic/bitops/find.h | 16 +++++++++ tools/lib/find_bit.c | 39 ++++++++++++++++------ 10 files changed, 147 insertions(+), 33 deletions(-) (limited to 'arch') diff --git a/arch/arm/include/asm/bitops.h b/arch/arm/include/asm/bitops.h index ce5ee762ed66..4cab9bb823fb 100644 --- a/arch/arm/include/asm/bitops.h +++ b/arch/arm/include/asm/bitops.h @@ -338,6 +338,7 @@ static inline int find_next_bit_le(const void *p, int size, int offset) #endif +#include #include /* diff --git a/arch/m68k/include/asm/bitops.h b/arch/m68k/include/asm/bitops.h index dda58cfe8c22..93b47b1f6fb4 100644 --- a/arch/m68k/include/asm/bitops.h +++ b/arch/m68k/include/asm/bitops.h @@ -311,7 +311,6 @@ static inline int bfchg_mem_test_and_change_bit(int nr, * functions. */ #if defined(CONFIG_CPU_HAS_NO_BITFIELDS) -#include #include #else @@ -441,6 +440,8 @@ static inline unsigned long ffz(unsigned long word) #endif +#include + #ifdef __KERNEL__ #if defined(CONFIG_CPU_HAS_NO_BITFIELDS) diff --git a/arch/unicore32/include/asm/bitops.h b/arch/unicore32/include/asm/bitops.h index 401f597bc38c..c0cbdbe17168 100644 --- a/arch/unicore32/include/asm/bitops.h +++ b/arch/unicore32/include/asm/bitops.h @@ -44,4 +44,6 @@ static inline int fls(int x) #define find_first_bit find_first_bit #define find_first_zero_bit find_first_zero_bit +#include + #endif /* __UNICORE_BITOPS_H__ */ diff --git a/include/asm-generic/bitops/find.h b/include/asm-generic/bitops/find.h index 1ba611e16fa0..8a1ee10014de 100644 --- a/include/asm-generic/bitops/find.h +++ b/include/asm-generic/bitops/find.h @@ -16,6 +16,22 @@ extern unsigned long find_next_bit(const unsigned long *addr, unsigned long size, unsigned long offset); #endif +#ifndef find_next_and_bit +/** + * find_next_and_bit - find the next set bit in both memory regions + * @addr1: The first address to base the search on + * @addr2: The second address to base the search on + * @offset: The bitnumber to start searching at + * @size: The bitmap size in bits + * + * Returns the bit number for the next set bit + * If no bits are set, returns @size. + */ +extern unsigned long find_next_and_bit(const unsigned long *addr1, + const unsigned long *addr2, unsigned long size, + unsigned long offset); +#endif + #ifndef find_next_zero_bit /** * find_next_zero_bit - find the next cleared bit in a memory region @@ -55,8 +71,12 @@ extern unsigned long find_first_zero_bit(const unsigned long *addr, unsigned long size); #else /* CONFIG_GENERIC_FIND_FIRST_BIT */ +#ifndef find_first_bit #define find_first_bit(addr, size) find_next_bit((addr), (size), 0) +#endif +#ifndef find_first_zero_bit #define find_first_zero_bit(addr, size) find_next_zero_bit((addr), (size), 0) +#endif #endif /* CONFIG_GENERIC_FIND_FIRST_BIT */ diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h index d9bf699e0e7a..5f11fbdc27f8 100644 --- a/include/linux/bitmap.h +++ b/include/linux/bitmap.h @@ -88,8 +88,12 @@ * test_and_change_bit(bit, addr) Change bit and return old value * find_first_zero_bit(addr, nbits) Position first zero bit in *addr * find_first_bit(addr, nbits) Position first set bit in *addr - * find_next_zero_bit(addr, nbits, bit) Position next zero bit in *addr >= bit + * find_next_zero_bit(addr, nbits, bit) + * Position next zero bit in *addr >= bit * find_next_bit(addr, nbits, bit) Position next set bit in *addr >= bit + * find_next_and_bit(addr1, addr2, nbits, bit) + * Same as find_next_bit, but in + * (*addr1 & *addr2) * */ diff --git a/lib/cpumask.c b/lib/cpumask.c index 35fe142ebb5e..beca6244671a 100644 --- a/lib/cpumask.c +++ b/lib/cpumask.c @@ -33,10 +33,11 @@ EXPORT_SYMBOL(cpumask_next); int cpumask_next_and(int n, const struct cpumask *src1p, const struct cpumask *src2p) { - while ((n = cpumask_next(n, src1p)) < nr_cpu_ids) - if (cpumask_test_cpu(n, src2p)) - break; - return n; + /* -1 is a legal arg here. */ + if (n != -1) + cpumask_check(n); + return find_next_and_bit(cpumask_bits(src1p), cpumask_bits(src2p), + nr_cpumask_bits, n + 1); } EXPORT_SYMBOL(cpumask_next_and); diff --git a/lib/find_bit.c b/lib/find_bit.c index 6ed74f78380c..ee3df93ba69a 100644 --- a/lib/find_bit.c +++ b/lib/find_bit.c @@ -21,22 +21,29 @@ #include #include -#if !defined(find_next_bit) || !defined(find_next_zero_bit) +#if !defined(find_next_bit) || !defined(find_next_zero_bit) || \ + !defined(find_next_and_bit) /* - * This is a common helper function for find_next_bit and - * find_next_zero_bit. The difference is the "invert" argument, which - * is XORed with each fetched word before searching it for one bits. + * This is a common helper function for find_next_bit, find_next_zero_bit, and + * find_next_and_bit. The differences are: + * - The "invert" argument, which is XORed with each fetched word before + * searching it for one bits. + * - The optional "addr2", which is anded with "addr1" if present. */ -static unsigned long _find_next_bit(const unsigned long *addr, - unsigned long nbits, unsigned long start, unsigned long invert) +static inline unsigned long _find_next_bit(const unsigned long *addr1, + const unsigned long *addr2, unsigned long nbits, + unsigned long start, unsigned long invert) { unsigned long tmp; if (unlikely(start >= nbits)) return nbits; - tmp = addr[start / BITS_PER_LONG] ^ invert; + tmp = addr1[start / BITS_PER_LONG]; + if (addr2) + tmp &= addr2[start / BITS_PER_LONG]; + tmp ^= invert; /* Handle 1st word. */ tmp &= BITMAP_FIRST_WORD_MASK(start); @@ -47,7 +54,10 @@ static unsigned long _find_next_bit(const unsigned long *addr, if (start >= nbits) return nbits; - tmp = addr[start / BITS_PER_LONG] ^ invert; + tmp = addr1[start / BITS_PER_LONG]; + if (addr2) + tmp &= addr2[start / BITS_PER_LONG]; + tmp ^= invert; } return min(start + __ffs(tmp), nbits); @@ -61,7 +71,7 @@ static unsigned long _find_next_bit(const unsigned long *addr, unsigned long find_next_bit(const unsigned long *addr, unsigned long size, unsigned long offset) { - return _find_next_bit(addr, size, offset, 0UL); + return _find_next_bit(addr, NULL, size, offset, 0UL); } EXPORT_SYMBOL(find_next_bit); #endif @@ -70,11 +80,21 @@ EXPORT_SYMBOL(find_next_bit); unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size, unsigned long offset) { - return _find_next_bit(addr, size, offset, ~0UL); + return _find_next_bit(addr, NULL, size, offset, ~0UL); } EXPORT_SYMBOL(find_next_zero_bit); #endif +#if !defined(find_next_and_bit) +unsigned long find_next_and_bit(const unsigned long *addr1, + const unsigned long *addr2, unsigned long size, + unsigned long offset) +{ + return _find_next_bit(addr1, addr2, size, offset, 0UL); +} +EXPORT_SYMBOL(find_next_and_bit); +#endif + #ifndef find_first_bit /* * Find the first set bit in a memory region. @@ -146,15 +166,19 @@ static inline unsigned long ext2_swab(const unsigned long y) } #if !defined(find_next_bit_le) || !defined(find_next_zero_bit_le) -static unsigned long _find_next_bit_le(const unsigned long *addr, - unsigned long nbits, unsigned long start, unsigned long invert) +static inline unsigned long _find_next_bit_le(const unsigned long *addr1, + const unsigned long *addr2, unsigned long nbits, + unsigned long start, unsigned long invert) { unsigned long tmp; if (unlikely(start >= nbits)) return nbits; - tmp = addr[start / BITS_PER_LONG] ^ invert; + tmp = addr1[start / BITS_PER_LONG]; + if (addr2) + tmp &= addr2[start / BITS_PER_LONG]; + tmp ^= invert; /* Handle 1st word. */ tmp &= ext2_swab(BITMAP_FIRST_WORD_MASK(start)); @@ -165,7 +189,10 @@ static unsigned long _find_next_bit_le(const unsigned long *addr, if (start >= nbits) return nbits; - tmp = addr[start / BITS_PER_LONG] ^ invert; + tmp = addr1[start / BITS_PER_LONG]; + if (addr2) + tmp &= addr2[start / BITS_PER_LONG]; + tmp ^= invert; } return min(start + __ffs(ext2_swab(tmp)), nbits); @@ -176,7 +203,7 @@ static unsigned long _find_next_bit_le(const unsigned long *addr, unsigned long find_next_zero_bit_le(const void *addr, unsigned long size, unsigned long offset) { - return _find_next_bit_le(addr, size, offset, ~0UL); + return _find_next_bit_le(addr, NULL, size, offset, ~0UL); } EXPORT_SYMBOL(find_next_zero_bit_le); #endif @@ -185,7 +212,7 @@ EXPORT_SYMBOL(find_next_zero_bit_le); unsigned long find_next_bit_le(const void *addr, unsigned long size, unsigned long offset) { - return _find_next_bit_le(addr, size, offset, 0UL); + return _find_next_bit_le(addr, NULL, size, offset, 0UL); } EXPORT_SYMBOL(find_next_bit_le); #endif diff --git a/lib/find_bit_benchmark.c b/lib/find_bit_benchmark.c index 67b19233c28f..5985a25e6cbc 100644 --- a/lib/find_bit_benchmark.c +++ b/lib/find_bit_benchmark.c @@ -35,6 +35,7 @@ #define SPARSE 500 static DECLARE_BITMAP(bitmap, BITMAP_LEN) __initdata; +static DECLARE_BITMAP(bitmap2, BITMAP_LEN) __initdata; /* * This is Schlemiel the Painter's algorithm. It should be called after @@ -103,6 +104,22 @@ static int __init test_find_last_bit(const void *bitmap, unsigned long len) return 0; } +static int __init test_find_next_and_bit(const void *bitmap, + const void *bitmap2, unsigned long len) +{ + unsigned long i, cnt; + cycles_t cycles; + + cycles = get_cycles(); + for (cnt = i = 0; i < BITMAP_LEN; cnt++) + i = find_next_and_bit(bitmap, bitmap2, BITMAP_LEN, i+1); + cycles = get_cycles() - cycles; + pr_err("find_next_and_bit:\t\t%llu cycles, %ld iterations\n", + (u64)cycles, cnt); + + return 0; +} + static int __init find_bit_test(void) { unsigned long nbits = BITMAP_LEN / SPARSE; @@ -110,23 +127,29 @@ static int __init find_bit_test(void) pr_err("\nStart testing find_bit() with random-filled bitmap\n"); get_random_bytes(bitmap, sizeof(bitmap)); + get_random_bytes(bitmap2, sizeof(bitmap2)); test_find_next_bit(bitmap, BITMAP_LEN); test_find_next_zero_bit(bitmap, BITMAP_LEN); test_find_last_bit(bitmap, BITMAP_LEN); test_find_first_bit(bitmap, BITMAP_LEN); + test_find_next_and_bit(bitmap, bitmap2, BITMAP_LEN); pr_err("\nStart testing find_bit() with sparse bitmap\n"); bitmap_zero(bitmap, BITMAP_LEN); + bitmap_zero(bitmap2, BITMAP_LEN); - while (nbits--) + while (nbits--) { __set_bit(prandom_u32() % BITMAP_LEN, bitmap); + __set_bit(prandom_u32() % BITMAP_LEN, bitmap2); + } test_find_next_bit(bitmap, BITMAP_LEN); test_find_next_zero_bit(bitmap, BITMAP_LEN); test_find_last_bit(bitmap, BITMAP_LEN); test_find_first_bit(bitmap, BITMAP_LEN); + test_find_next_and_bit(bitmap, bitmap2, BITMAP_LEN); /* * Everything is OK. Return error just to let user run benchmark diff --git a/tools/include/asm-generic/bitops/find.h b/tools/include/asm-generic/bitops/find.h index 9311fadaaab2..16ed1982cb34 100644 --- a/tools/include/asm-generic/bitops/find.h +++ b/tools/include/asm-generic/bitops/find.h @@ -16,6 +16,22 @@ extern unsigned long find_next_bit(const unsigned long *addr, unsigned long size, unsigned long offset); #endif +#ifndef find_next_and_bit +/** + * find_next_and_bit - find the next set bit in both memory regions + * @addr1: The first address to base the search on + * @addr2: The second address to base the search on + * @offset: The bitnumber to start searching at + * @size: The bitmap size in bits + * + * Returns the bit number for the next set bit + * If no bits are set, returns @size. + */ +extern unsigned long find_next_and_bit(const unsigned long *addr1, + const unsigned long *addr2, unsigned long size, + unsigned long offset); +#endif + #ifndef find_next_zero_bit /** diff --git a/tools/lib/find_bit.c b/tools/lib/find_bit.c index 42c15f906aac..a88bd507091e 100644 --- a/tools/lib/find_bit.c +++ b/tools/lib/find_bit.c @@ -22,22 +22,29 @@ #include #include -#if !defined(find_next_bit) +#if !defined(find_next_bit) || !defined(find_next_zero_bit) || \ + !defined(find_next_and_bit) /* - * This is a common helper function for find_next_bit and - * find_next_zero_bit. The difference is the "invert" argument, which - * is XORed with each fetched word before searching it for one bits. + * This is a common helper function for find_next_bit, find_next_zero_bit, and + * find_next_and_bit. The differences are: + * - The "invert" argument, which is XORed with each fetched word before + * searching it for one bits. + * - The optional "addr2", which is anded with "addr1" if present. */ -static unsigned long _find_next_bit(const unsigned long *addr, - unsigned long nbits, unsigned long start, unsigned long invert) +static inline unsigned long _find_next_bit(const unsigned long *addr1, + const unsigned long *addr2, unsigned long nbits, + unsigned long start, unsigned long invert) { unsigned long tmp; if (unlikely(start >= nbits)) return nbits; - tmp = addr[start / BITS_PER_LONG] ^ invert; + tmp = addr1[start / BITS_PER_LONG]; + if (addr2) + tmp &= addr2[start / BITS_PER_LONG]; + tmp ^= invert; /* Handle 1st word. */ tmp &= BITMAP_FIRST_WORD_MASK(start); @@ -48,7 +55,10 @@ static unsigned long _find_next_bit(const unsigned long *addr, if (start >= nbits) return nbits; - tmp = addr[start / BITS_PER_LONG] ^ invert; + tmp = addr1[start / BITS_PER_LONG]; + if (addr2) + tmp &= addr2[start / BITS_PER_LONG]; + tmp ^= invert; } return min(start + __ffs(tmp), nbits); @@ -62,7 +72,7 @@ static unsigned long _find_next_bit(const unsigned long *addr, unsigned long find_next_bit(const unsigned long *addr, unsigned long size, unsigned long offset) { - return _find_next_bit(addr, size, offset, 0UL); + return _find_next_bit(addr, NULL, size, offset, 0UL); } #endif @@ -104,6 +114,15 @@ unsigned long find_first_zero_bit(const unsigned long *addr, unsigned long size) unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size, unsigned long offset) { - return _find_next_bit(addr, size, offset, ~0UL); + return _find_next_bit(addr, NULL, size, offset, ~0UL); +} +#endif + +#ifndef find_next_and_bit +unsigned long find_next_and_bit(const unsigned long *addr1, + const unsigned long *addr2, unsigned long size, + unsigned long offset) +{ + return _find_next_bit(addr1, addr2, size, offset, 0UL); } #endif -- cgit v1.2.3 From 2ee0826085d1c0281cb60c1f4bc3e0c27efeedc3 Mon Sep 17 00:00:00 2001 From: Mike Rapoport Date: Tue, 6 Feb 2018 15:40:17 -0800 Subject: pids: introduce find_get_task_by_vpid() helper There are several functions that do find_task_by_vpid() followed by get_task_struct(). We can use a helper function instead. Link: http://lkml.kernel.org/r/1509602027-11337-1-git-send-email-rppt@linux.vnet.ibm.com Signed-off-by: Mike Rapoport Acked-by: Oleg Nesterov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/ia64/kernel/perfmon.c | 13 +++---------- include/linux/sched.h | 5 +++++ kernel/futex.c | 20 +------------------- kernel/pid.c | 13 +++++++++++++ kernel/ptrace.c | 27 ++++++--------------------- kernel/taskstats.c | 6 +----- mm/process_vm_access.c | 6 +----- security/yama/yama_lsm.c | 11 +++-------- 8 files changed, 33 insertions(+), 68 deletions(-) (limited to 'arch') diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index c44f002e8f6b..858602494096 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c @@ -2610,17 +2610,10 @@ pfm_get_task(pfm_context_t *ctx, pid_t pid, struct task_struct **task) if (pid < 2) return -EPERM; if (pid != task_pid_vnr(current)) { - - read_lock(&tasklist_lock); - - p = find_task_by_vpid(pid); - /* make sure task cannot go away while we operate on it */ - if (p) get_task_struct(p); - - read_unlock(&tasklist_lock); - - if (p == NULL) return -ESRCH; + p = find_get_task_by_vpid(pid); + if (!p) + return -ESRCH; } ret = pfm_task_incompatible(ctx, p); diff --git a/include/linux/sched.h b/include/linux/sched.h index 166144c04ef6..ce5a27304b03 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1489,6 +1489,11 @@ static inline struct thread_info *task_thread_info(struct task_struct *task) extern struct task_struct *find_task_by_vpid(pid_t nr); extern struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns); +/* + * find a task by its virtual pid and get the task struct + */ +extern struct task_struct *find_get_task_by_vpid(pid_t nr); + extern int wake_up_state(struct task_struct *tsk, unsigned int state); extern int wake_up_process(struct task_struct *tsk); extern void wake_up_new_task(struct task_struct *tsk); diff --git a/kernel/futex.c b/kernel/futex.c index 7f719d110908..1f450e092c74 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -862,24 +862,6 @@ static void put_pi_state(struct futex_pi_state *pi_state) } } -/* - * Look up the task based on what TID userspace gave us. - * We dont trust it. - */ -static struct task_struct *futex_find_get_task(pid_t pid) -{ - struct task_struct *p; - - rcu_read_lock(); - p = find_task_by_vpid(pid); - if (p) - get_task_struct(p); - - rcu_read_unlock(); - - return p; -} - #ifdef CONFIG_FUTEX_PI /* @@ -1183,7 +1165,7 @@ static int attach_to_pi_owner(u32 uval, union futex_key *key, */ if (!pid) return -ESRCH; - p = futex_find_get_task(pid); + p = find_get_task_by_vpid(pid); if (!p) return -ESRCH; diff --git a/kernel/pid.c b/kernel/pid.c index 5d30c87e3c42..ed6c343fe50d 100644 --- a/kernel/pid.c +++ b/kernel/pid.c @@ -343,6 +343,19 @@ struct task_struct *find_task_by_vpid(pid_t vnr) return find_task_by_pid_ns(vnr, task_active_pid_ns(current)); } +struct task_struct *find_get_task_by_vpid(pid_t nr) +{ + struct task_struct *task; + + rcu_read_lock(); + task = find_task_by_vpid(nr); + if (task) + get_task_struct(task); + rcu_read_unlock(); + + return task; +} + struct pid *get_task_pid(struct task_struct *task, enum pid_type type) { struct pid *pid; diff --git a/kernel/ptrace.c b/kernel/ptrace.c index 5e1d713c8e61..21fec73d45d4 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c @@ -1103,21 +1103,6 @@ int ptrace_request(struct task_struct *child, long request, return ret; } -static struct task_struct *ptrace_get_task_struct(pid_t pid) -{ - struct task_struct *child; - - rcu_read_lock(); - child = find_task_by_vpid(pid); - if (child) - get_task_struct(child); - rcu_read_unlock(); - - if (!child) - return ERR_PTR(-ESRCH); - return child; -} - #ifndef arch_ptrace_attach #define arch_ptrace_attach(child) do { } while (0) #endif @@ -1135,9 +1120,9 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr, goto out; } - child = ptrace_get_task_struct(pid); - if (IS_ERR(child)) { - ret = PTR_ERR(child); + child = find_get_task_by_vpid(pid); + if (!child) { + ret = -ESRCH; goto out; } @@ -1281,9 +1266,9 @@ COMPAT_SYSCALL_DEFINE4(ptrace, compat_long_t, request, compat_long_t, pid, goto out; } - child = ptrace_get_task_struct(pid); - if (IS_ERR(child)) { - ret = PTR_ERR(child); + child = find_get_task_by_vpid(pid); + if (!child) { + ret = -ESRCH; goto out; } diff --git a/kernel/taskstats.c b/kernel/taskstats.c index 4559e914452b..4e62a4a8fa91 100644 --- a/kernel/taskstats.c +++ b/kernel/taskstats.c @@ -194,11 +194,7 @@ static int fill_stats_for_pid(pid_t pid, struct taskstats *stats) { struct task_struct *tsk; - rcu_read_lock(); - tsk = find_task_by_vpid(pid); - if (tsk) - get_task_struct(tsk); - rcu_read_unlock(); + tsk = find_get_task_by_vpid(pid); if (!tsk) return -ESRCH; fill_stats(current_user_ns(), task_active_pid_ns(current), tsk, stats); diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c index 8973cd231ece..16424b9ae424 100644 --- a/mm/process_vm_access.c +++ b/mm/process_vm_access.c @@ -197,11 +197,7 @@ static ssize_t process_vm_rw_core(pid_t pid, struct iov_iter *iter, } /* Get process information */ - rcu_read_lock(); - task = find_task_by_vpid(pid); - if (task) - get_task_struct(task); - rcu_read_unlock(); + task = find_get_task_by_vpid(pid); if (!task) { rc = -ESRCH; goto free_proc_pages; diff --git a/security/yama/yama_lsm.c b/security/yama/yama_lsm.c index 8298e094f4f7..ffda91a4a1aa 100644 --- a/security/yama/yama_lsm.c +++ b/security/yama/yama_lsm.c @@ -250,15 +250,10 @@ int yama_task_prctl(int option, unsigned long arg2, unsigned long arg3, } else { struct task_struct *tracer; - rcu_read_lock(); - tracer = find_task_by_vpid(arg2); - if (tracer) - get_task_struct(tracer); - else + tracer = find_get_task_by_vpid(arg2); + if (!tracer) { rc = -EINVAL; - rcu_read_unlock(); - - if (tracer) { + } else { rc = yama_ptracer_add(tracer, myself); put_task_struct(tracer); } -- cgit v1.2.3 From b0f7e32ca1a8748e8f7f355c9e31c450c2bfad2f Mon Sep 17 00:00:00 2001 From: Markus Elfring Date: Tue, 6 Feb 2018 15:40:56 -0800 Subject: arch/score/kernel/setup.c: combine two seq_printf() calls into one call in show_cpuinfo() Some data were printed into a sequence by two separate function calls. Print the same data by a single function call instead. This issue was detected by using the Coccinelle software. Link: http://lkml.kernel.org/r/ddcfff3a-9502-6ce0-b08a-365eb55ce958@users.sourceforge.net Signed-off-by: Markus Elfring Cc: Chen Liqin Cc: Lennox Wu Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/score/kernel/setup.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/score/kernel/setup.c b/arch/score/kernel/setup.c index f3a0649ab521..627416bbd0b1 100644 --- a/arch/score/kernel/setup.c +++ b/arch/score/kernel/setup.c @@ -124,9 +124,7 @@ static int show_cpuinfo(struct seq_file *m, void *v) { unsigned long n = (unsigned long) v - 1; - seq_printf(m, "processor\t\t: %ld\n", n); - seq_printf(m, "\n"); - + seq_printf(m, "processor\t\t: %ld\n\n", n); return 0; } -- cgit v1.2.3