diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2022-08-08 03:52:35 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2022-08-08 03:52:35 +0300 |
commit | 4e23eeebb2e57f5a28b36221aa776b5a1122dde5 (patch) | |
tree | f8376df9b40f35576533e830f8c2a1b7e981c160 /lib | |
parent | 3bc1bc0b59d04e997db25b84babf459ca1cd80b7 (diff) | |
parent | 36d4b36b69590fed99356a4426c940a253a93800 (diff) | |
download | linux-4e23eeebb2e57f5a28b36221aa776b5a1122dde5.tar.xz |
Merge tag 'bitmap-6.0-rc1' of https://github.com/norov/linux
Pull bitmap updates from Yury Norov:
- fix the duplicated comments on bitmap_to_arr64() (Qu Wenruo)
- optimize out non-atomic bitops on compile-time constants (Alexander
Lobakin)
- cleanup bitmap-related headers (Yury Norov)
- x86/olpc: fix 'logical not is only applied to the left hand side'
(Alexander Lobakin)
- lib/nodemask: inline wrappers around bitmap (Yury Norov)
* tag 'bitmap-6.0-rc1' of https://github.com/norov/linux: (26 commits)
lib/nodemask: inline next_node_in() and node_random()
powerpc: drop dependency on <asm/machdep.h> in archrandom.h
x86/olpc: fix 'logical not is only applied to the left hand side'
lib/cpumask: move some one-line wrappers to header file
headers/deps: mm: align MANITAINERS and Docs with new gfp.h structure
headers/deps: mm: Split <linux/gfp_types.h> out of <linux/gfp.h>
headers/deps: mm: Optimize <linux/gfp.h> header dependencies
lib/cpumask: move trivial wrappers around find_bit to the header
lib/cpumask: change return types to unsigned where appropriate
cpumask: change return types to bool where appropriate
lib/bitmap: change type of bitmap_weight to unsigned long
lib/bitmap: change return types to bool where appropriate
arm: align find_bit declarations with generic kernel
iommu/vt-d: avoid invalid memory access via node_online(NUMA_NO_NODE)
lib/test_bitmap: test the tail after bitmap_to_arr64()
lib/bitmap: fix off-by-one in bitmap_to_arr64()
lib: test_bitmap: add compile-time optimization/evaluations assertions
bitmap: don't assume compiler evaluates small mem*() builtins calls
net/ice: fix initializing the bitmap in the switch code
bitops: let optimize out non-atomic bitops on compile-time constants
...
Diffstat (limited to 'lib')
-rw-r--r-- | lib/Makefile | 2 | ||||
-rw-r--r-- | lib/bitmap.c | 11 | ||||
-rw-r--r-- | lib/cpumask.c | 97 | ||||
-rw-r--r-- | lib/nodemask.c | 8 | ||||
-rw-r--r-- | lib/test_bitmap.c | 68 |
5 files changed, 81 insertions, 105 deletions
diff --git a/lib/Makefile b/lib/Makefile index 17e48da223e2..c95212141928 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -33,7 +33,7 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \ flex_proportions.o ratelimit.o show_mem.o \ is_single_threaded.o plist.o decompress.o kobject_uevent.o \ earlycpio.o seq_buf.o siphash.o dec_and_lock.o \ - nmi_backtrace.o nodemask.o win_minmax.o memcat_p.o \ + nmi_backtrace.o win_minmax.o memcat_p.o \ buildid.o cpumask.o lib-$(CONFIG_PRINTK) += dump_stack.o diff --git a/lib/bitmap.c b/lib/bitmap.c index b18e31ea6e66..488e6c3e5acc 100644 --- a/lib/bitmap.c +++ b/lib/bitmap.c @@ -237,7 +237,7 @@ void bitmap_cut(unsigned long *dst, const unsigned long *src, } EXPORT_SYMBOL(bitmap_cut); -int __bitmap_and(unsigned long *dst, const unsigned long *bitmap1, +bool __bitmap_and(unsigned long *dst, const unsigned long *bitmap1, const unsigned long *bitmap2, unsigned int bits) { unsigned int k; @@ -275,7 +275,7 @@ void __bitmap_xor(unsigned long *dst, const unsigned long *bitmap1, } EXPORT_SYMBOL(__bitmap_xor); -int __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1, +bool __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1, const unsigned long *bitmap2, unsigned int bits) { unsigned int k; @@ -333,10 +333,9 @@ bool __bitmap_subset(const unsigned long *bitmap1, } EXPORT_SYMBOL(__bitmap_subset); -int __bitmap_weight(const unsigned long *bitmap, unsigned int bits) +unsigned int __bitmap_weight(const unsigned long *bitmap, unsigned int bits) { - unsigned int k, lim = bits/BITS_PER_LONG; - int w = 0; + unsigned int k, lim = bits/BITS_PER_LONG, w = 0; for (k = 0; k < lim; k++) w += hweight_long(bitmap[k]); @@ -1564,7 +1563,7 @@ void bitmap_to_arr64(u64 *buf, const unsigned long *bitmap, unsigned int nbits) /* Clear tail bits in the last element of array beyond nbits. */ if (nbits % 64) - buf[-1] &= GENMASK_ULL(nbits % 64, 0); + buf[-1] &= GENMASK_ULL((nbits - 1) % 64, 0); } EXPORT_SYMBOL(bitmap_to_arr64); #endif diff --git a/lib/cpumask.c b/lib/cpumask.c index b9728513a4d4..8baeb37e23d3 100644 --- a/lib/cpumask.c +++ b/lib/cpumask.c @@ -8,61 +8,6 @@ #include <linux/numa.h> /** - * cpumask_next - get the next cpu in a cpumask - * @n: the cpu prior to the place to search (ie. return will be > @n) - * @srcp: the cpumask pointer - * - * Returns >= nr_cpu_ids if no further cpus set. - */ -unsigned int cpumask_next(int n, const struct cpumask *srcp) -{ - /* -1 is a legal arg here. */ - if (n != -1) - cpumask_check(n); - return find_next_bit(cpumask_bits(srcp), nr_cpumask_bits, n + 1); -} -EXPORT_SYMBOL(cpumask_next); - -/** - * cpumask_next_and - get the next cpu in *src1p & *src2p - * @n: the cpu prior to the place to search (ie. return will be > @n) - * @src1p: the first cpumask pointer - * @src2p: the second cpumask pointer - * - * Returns >= nr_cpu_ids if no further cpus set in both. - */ -int cpumask_next_and(int n, const struct cpumask *src1p, - const struct cpumask *src2p) -{ - /* -1 is a legal arg here. */ - if (n != -1) - cpumask_check(n); - return find_next_and_bit(cpumask_bits(src1p), cpumask_bits(src2p), - nr_cpumask_bits, n + 1); -} -EXPORT_SYMBOL(cpumask_next_and); - -/** - * cpumask_any_but - return a "random" in a cpumask, but not this one. - * @mask: the cpumask to search - * @cpu: the cpu to ignore. - * - * Often used to find any cpu but smp_processor_id() in a mask. - * Returns >= nr_cpu_ids if no cpus set. - */ -int cpumask_any_but(const struct cpumask *mask, unsigned int cpu) -{ - unsigned int i; - - cpumask_check(cpu); - for_each_cpu(i, mask) - if (i != cpu) - break; - return i; -} -EXPORT_SYMBOL(cpumask_any_but); - -/** * cpumask_next_wrap - helper to implement for_each_cpu_wrap * @n: the cpu prior to the place to search * @mask: the cpumask pointer @@ -74,9 +19,9 @@ EXPORT_SYMBOL(cpumask_any_but); * Note: the @wrap argument is required for the start condition when * we cannot assume @start is set in @mask. */ -int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap) +unsigned int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap) { - int next; + unsigned int next; again: next = cpumask_next(n, mask); @@ -125,34 +70,6 @@ bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node) } EXPORT_SYMBOL(alloc_cpumask_var_node); -bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node) -{ - return alloc_cpumask_var_node(mask, flags | __GFP_ZERO, node); -} -EXPORT_SYMBOL(zalloc_cpumask_var_node); - -/** - * alloc_cpumask_var - allocate a struct cpumask - * @mask: pointer to cpumask_var_t where the cpumask is returned - * @flags: GFP_ flags - * - * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is - * a nop returning a constant 1 (in <linux/cpumask.h>). - * - * See alloc_cpumask_var_node. - */ -bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags) -{ - return alloc_cpumask_var_node(mask, flags, NUMA_NO_NODE); -} -EXPORT_SYMBOL(alloc_cpumask_var); - -bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags) -{ - return alloc_cpumask_var(mask, flags | __GFP_ZERO); -} -EXPORT_SYMBOL(zalloc_cpumask_var); - /** * alloc_bootmem_cpumask_var - allocate a struct cpumask from the bootmem arena. * @mask: pointer to cpumask_var_t where the cpumask is returned @@ -206,7 +123,7 @@ void __init free_bootmem_cpumask_var(cpumask_var_t mask) */ unsigned int cpumask_local_spread(unsigned int i, int node) { - int cpu; + unsigned int cpu; /* Wrap: we always want a cpu. */ i %= num_online_cpus(); @@ -244,10 +161,10 @@ static DEFINE_PER_CPU(int, distribute_cpu_mask_prev); * * Returns >= nr_cpu_ids if the intersection is empty. */ -int cpumask_any_and_distribute(const struct cpumask *src1p, +unsigned int cpumask_any_and_distribute(const struct cpumask *src1p, const struct cpumask *src2p) { - int next, prev; + unsigned int next, prev; /* NOTE: our first selection will skip 0. */ prev = __this_cpu_read(distribute_cpu_mask_prev); @@ -263,9 +180,9 @@ int cpumask_any_and_distribute(const struct cpumask *src1p, } EXPORT_SYMBOL(cpumask_any_and_distribute); -int cpumask_any_distribute(const struct cpumask *srcp) +unsigned int cpumask_any_distribute(const struct cpumask *srcp) { - int next, prev; + unsigned int next, prev; /* NOTE: our first selection will skip 0. */ prev = __this_cpu_read(distribute_cpu_mask_prev); diff --git a/lib/nodemask.c b/lib/nodemask.c index e22647f5181b..b8a433d16b51 100644 --- a/lib/nodemask.c +++ b/lib/nodemask.c @@ -3,14 +3,6 @@ #include <linux/module.h> #include <linux/random.h> -unsigned int __next_node_in(int node, const nodemask_t *srcp) -{ - unsigned int ret = __next_node(node, srcp); - - if (ret == MAX_NUMNODES) - ret = __first_node(srcp); - return ret; -} EXPORT_SYMBOL(__next_node_in); #ifdef CONFIG_NUMA diff --git a/lib/test_bitmap.c b/lib/test_bitmap.c index d5923a640457..98754ff9fe68 100644 --- a/lib/test_bitmap.c +++ b/lib/test_bitmap.c @@ -604,6 +604,12 @@ static void __init test_bitmap_arr64(void) pr_err("bitmap_copy_arr64(nbits == %d:" " tail is not safely cleared: %d\n", nbits, next_bit); + if ((nbits % 64) && + (arr[(nbits - 1) / 64] & ~GENMASK_ULL((nbits - 1) % 64, 0))) + pr_err("bitmap_to_arr64(nbits == %d): tail is not safely cleared: 0x%016llx (must be 0x%016llx)\n", + nbits, arr[(nbits - 1) / 64], + GENMASK_ULL((nbits - 1) % 64, 0)); + if (nbits < EXP1_IN_BITS - 64) expect_eq_uint(arr[DIV_ROUND_UP(nbits, 64)], 0xa5a5a5a5); } @@ -869,6 +875,67 @@ static void __init test_bitmap_print_buf(void) } } +static void __init test_bitmap_const_eval(void) +{ + DECLARE_BITMAP(bitmap, BITS_PER_LONG); + unsigned long initvar = BIT(2); + unsigned long bitopvar = 0; + unsigned long var = 0; + int res; + + /* + * Compilers must be able to optimize all of those to compile-time + * constants on any supported optimization level (-O2, -Os) and any + * architecture. Otherwise, trigger a build bug. + * The whole function gets optimized out then, there's nothing to do + * in runtime. + */ + + /* + * Equals to `unsigned long bitmap[1] = { GENMASK(6, 5), }`. + * Clang on s390 optimizes bitops at compile-time as intended, but at + * the same time stops treating @bitmap and @bitopvar as compile-time + * constants after regular test_bit() is executed, thus triggering the + * build bugs below. So, call const_test_bit() there directly until + * the compiler is fixed. + */ + bitmap_clear(bitmap, 0, BITS_PER_LONG); +#if defined(__s390__) && defined(__clang__) + if (!const_test_bit(7, bitmap)) +#else + if (!test_bit(7, bitmap)) +#endif + bitmap_set(bitmap, 5, 2); + + /* Equals to `unsigned long bitopvar = BIT(20)` */ + __change_bit(31, &bitopvar); + bitmap_shift_right(&bitopvar, &bitopvar, 11, BITS_PER_LONG); + + /* Equals to `unsigned long var = BIT(25)` */ + var |= BIT(25); + if (var & BIT(0)) + var ^= GENMASK(9, 6); + + /* __const_hweight<32|64>(GENMASK(6, 5)) == 2 */ + res = bitmap_weight(bitmap, 20); + BUILD_BUG_ON(!__builtin_constant_p(res)); + BUILD_BUG_ON(res != 2); + + /* !(BIT(31) & BIT(18)) == 1 */ + res = !test_bit(18, &bitopvar); + BUILD_BUG_ON(!__builtin_constant_p(res)); + BUILD_BUG_ON(!res); + + /* BIT(2) & GENMASK(14, 8) == 0 */ + res = initvar & GENMASK(14, 8); + BUILD_BUG_ON(!__builtin_constant_p(res)); + BUILD_BUG_ON(res); + + /* ~BIT(25) */ + BUILD_BUG_ON(!__builtin_constant_p(~var)); + BUILD_BUG_ON(~var != ~BIT(25)); +} + static void __init selftest(void) { test_zero_clear(); @@ -884,6 +951,7 @@ static void __init selftest(void) test_for_each_set_clump8(); test_bitmap_cut(); test_bitmap_print_buf(); + test_bitmap_const_eval(); } KSTM_MODULE_LOADERS(test_bitmap); |