diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2022-10-17 01:27:07 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2022-10-17 01:27:07 +0300 |
commit | f1947d7c8a61db1cb0ef909a6512ede0b1f2115b (patch) | |
tree | bbe7f785243bb692f243d08de8bc5ef4a82454d6 /mm | |
parent | 8636df94ec917019c4cb744ba0a1f94cf9057790 (diff) | |
parent | de492c83cae0af72de370b9404aacda93dafcad5 (diff) | |
download | linux-f1947d7c8a61db1cb0ef909a6512ede0b1f2115b.tar.xz |
Merge tag 'random-6.1-rc1-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/crng/random
Pull more random number generator updates from Jason Donenfeld:
"This time with some large scale treewide cleanups.
The intent of this pull is to clean up the way callers fetch random
integers. The current rules for doing this right are:
- If you want a secure or an insecure random u64, use get_random_u64()
- If you want a secure or an insecure random u32, use get_random_u32()
The old function prandom_u32() has been deprecated for a while
now and is just a wrapper around get_random_u32(). Same for
get_random_int().
- If you want a secure or an insecure random u16, use get_random_u16()
- If you want a secure or an insecure random u8, use get_random_u8()
- If you want secure or insecure random bytes, use get_random_bytes().
The old function prandom_bytes() has been deprecated for a while
now and has long been a wrapper around get_random_bytes()
- If you want a non-uniform random u32, u16, or u8 bounded by a
certain open interval maximum, use prandom_u32_max()
I say "non-uniform", because it doesn't do any rejection sampling
or divisions. Hence, it stays within the prandom_*() namespace, not
the get_random_*() namespace.
I'm currently investigating a "uniform" function for 6.2. We'll see
what comes of that.
By applying these rules uniformly, we get several benefits:
- By using prandom_u32_max() with an upper-bound that the compiler
can prove at compile-time is ≤65536 or ≤256, internally
get_random_u16() or get_random_u8() is used, which wastes fewer
batched random bytes, and hence has higher throughput.
- By using prandom_u32_max() instead of %, when the upper-bound is
not a constant, division is still avoided, because
prandom_u32_max() uses a faster multiplication-based trick instead.
- By using get_random_u16() or get_random_u8() in cases where the
return value is intended to indeed be a u16 or a u8, we waste fewer
batched random bytes, and hence have higher throughput.
This series was originally done by hand while I was on an airplane
without Internet. Later, Kees and I worked on retroactively figuring
out what could be done with Coccinelle and what had to be done
manually, and then we split things up based on that.
So while this touches a lot of files, the actual amount of code that's
hand fiddled is comfortably small"
* tag 'random-6.1-rc1-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/crng/random:
prandom: remove unused functions
treewide: use get_random_bytes() when possible
treewide: use get_random_u32() when possible
treewide: use get_random_{u8,u16}() when possible, part 2
treewide: use get_random_{u8,u16}() when possible, part 1
treewide: use prandom_u32_max() when possible, part 2
treewide: use prandom_u32_max() when possible, part 1
Diffstat (limited to 'mm')
-rw-r--r-- | mm/kasan/kasan_test.c | 6 | ||||
-rw-r--r-- | mm/shmem.c | 2 | ||||
-rw-r--r-- | mm/slab.c | 2 | ||||
-rw-r--r-- | mm/slub.c | 2 |
4 files changed, 6 insertions, 6 deletions
diff --git a/mm/kasan/kasan_test.c b/mm/kasan/kasan_test.c index 57e4c72aa8bd..0d59098f0876 100644 --- a/mm/kasan/kasan_test.c +++ b/mm/kasan/kasan_test.c @@ -1299,7 +1299,7 @@ static void match_all_not_assigned(struct kunit *test) KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC); for (i = 0; i < 256; i++) { - size = (get_random_int() % 1024) + 1; + size = prandom_u32_max(1024) + 1; ptr = kmalloc(size, GFP_KERNEL); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); KUNIT_EXPECT_GE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_MIN); @@ -1308,7 +1308,7 @@ static void match_all_not_assigned(struct kunit *test) } for (i = 0; i < 256; i++) { - order = (get_random_int() % 4) + 1; + order = prandom_u32_max(4) + 1; pages = alloc_pages(GFP_KERNEL, order); ptr = page_address(pages); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); @@ -1321,7 +1321,7 @@ static void match_all_not_assigned(struct kunit *test) return; for (i = 0; i < 256; i++) { - size = (get_random_int() % 1024) + 1; + size = prandom_u32_max(1024) + 1; ptr = vmalloc(size); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); KUNIT_EXPECT_GE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_MIN); diff --git a/mm/shmem.c b/mm/shmem.c index 86214d48dd09..8280a5cb48df 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -2332,7 +2332,7 @@ static struct inode *shmem_get_inode(struct super_block *sb, struct inode *dir, inode_init_owner(&init_user_ns, inode, dir, mode); inode->i_blocks = 0; inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode); - inode->i_generation = prandom_u32(); + inode->i_generation = get_random_u32(); info = SHMEM_I(inode); memset(info, 0, (char *)inode - (char *)info); spin_lock_init(&info->lock); diff --git a/mm/slab.c b/mm/slab.c index d1f6e2c64c2e..59c8e28f7b6a 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -2381,7 +2381,7 @@ static bool freelist_state_initialize(union freelist_init_state *state, unsigned int rand; /* Use best entropy available to define a random shift */ - rand = get_random_int(); + rand = get_random_u32(); /* Use a random state if the pre-computed list is not available */ if (!cachep->random_seq) { diff --git a/mm/slub.c b/mm/slub.c index 96dd392d7f99..157527d7101b 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1881,7 +1881,7 @@ static bool shuffle_freelist(struct kmem_cache *s, struct slab *slab) return false; freelist_count = oo_objects(s->oo); - pos = get_random_int() % freelist_count; + pos = prandom_u32_max(freelist_count); page_limit = slab->objects * s->size; start = fixup_red_left(s, slab_address(slab)); |