summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2026-02-27 20:54:02 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2026-02-27 20:54:02 +0300
commit3feb464fb7784d09008dce6c95f895e815ee97d0 (patch)
tree88b211cdd902c17585de5c31522b8ee96970dfff
parentd5a8e4be46514982c143a91549c678002b839a28 (diff)
parente9217ca77dc35b4978db0fe901685ddb3f1e223a (diff)
downloadlinux-3feb464fb7784d09008dce6c95f895e815ee97d0.tar.xz
Merge tag 'slab-for-7.0-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vbabka/slab
Pull slab fixes from Vlastimil Babka: - Fix for spurious page allocation warnings on sheaf refill (Harry Yoo) - Fix for CONFIG_MEM_ALLOC_PROFILING_DEBUG warnings (Suren Baghdasaryan) - Fix for kernel-doc warning on ksize() (Sanjay Chitroda) - Fix to avoid setting slab->stride later than on slab allocation. Doesn't yet fix the reports from powerpc; debugging is making progress (Harry Yoo) * tag 'slab-for-7.0-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vbabka/slab: mm/slab: initialize slab->stride early to avoid memory ordering issues mm/slub: drop duplicate kernel-doc for ksize() mm/slab: mark alloc tags empty for sheaves allocated with __GFP_NO_OBJ_EXT mm/slab: pass __GFP_NOWARN to refill_sheaf() if fallback is available
-rw-r--r--include/linux/gfp_types.h2
-rw-r--r--include/linux/slab.h12
-rw-r--r--mm/slab.h4
-rw-r--r--mm/slub.c51
4 files changed, 39 insertions, 30 deletions
diff --git a/include/linux/gfp_types.h b/include/linux/gfp_types.h
index 814bb2892f99..6c75df30a281 100644
--- a/include/linux/gfp_types.h
+++ b/include/linux/gfp_types.h
@@ -139,6 +139,8 @@ enum {
* %__GFP_ACCOUNT causes the allocation to be accounted to kmemcg.
*
* %__GFP_NO_OBJ_EXT causes slab allocation to have no object extension.
+ * mark_obj_codetag_empty() should be called upon freeing for objects allocated
+ * with this flag to indicate that their NULL tags are expected and normal.
*/
#define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE)
#define __GFP_WRITE ((__force gfp_t)___GFP_WRITE)
diff --git a/include/linux/slab.h b/include/linux/slab.h
index a5a5e4108ae5..15a60b501b95 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -517,18 +517,6 @@ void kfree_sensitive(const void *objp);
DEFINE_FREE(kfree, void *, if (!IS_ERR_OR_NULL(_T)) kfree(_T))
DEFINE_FREE(kfree_sensitive, void *, if (_T) kfree_sensitive(_T))
-/**
- * ksize - Report actual allocation size of associated object
- *
- * @objp: Pointer returned from a prior kmalloc()-family allocation.
- *
- * This should not be used for writing beyond the originally requested
- * allocation size. Either use krealloc() or round up the allocation size
- * with kmalloc_size_roundup() prior to allocation. If this is used to
- * access beyond the originally requested allocation size, UBSAN_BOUNDS
- * and/or FORTIFY_SOURCE may trip, since they only know about the
- * originally allocated size via the __alloc_size attribute.
- */
size_t ksize(const void *objp);
#ifdef CONFIG_PRINTK
diff --git a/mm/slab.h b/mm/slab.h
index 71c7261bf822..f6ef862b60ef 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -290,14 +290,14 @@ static inline void *nearest_obj(struct kmem_cache *cache,
/* Determine object index from a given position */
static inline unsigned int __obj_to_index(const struct kmem_cache *cache,
- void *addr, void *obj)
+ void *addr, const void *obj)
{
return reciprocal_divide(kasan_reset_tag(obj) - addr,
cache->reciprocal_size);
}
static inline unsigned int obj_to_index(const struct kmem_cache *cache,
- const struct slab *slab, void *obj)
+ const struct slab *slab, const void *obj)
{
if (is_kfence_address(obj))
return 0;
diff --git a/mm/slub.c b/mm/slub.c
index 862642c165ed..0c906fefc31b 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2041,18 +2041,18 @@ static inline void dec_slabs_node(struct kmem_cache *s, int node,
#ifdef CONFIG_MEM_ALLOC_PROFILING_DEBUG
-static inline void mark_objexts_empty(struct slabobj_ext *obj_exts)
+static inline void mark_obj_codetag_empty(const void *obj)
{
- struct slab *obj_exts_slab;
+ struct slab *obj_slab;
unsigned long slab_exts;
- obj_exts_slab = virt_to_slab(obj_exts);
- slab_exts = slab_obj_exts(obj_exts_slab);
+ obj_slab = virt_to_slab(obj);
+ slab_exts = slab_obj_exts(obj_slab);
if (slab_exts) {
get_slab_obj_exts(slab_exts);
- unsigned int offs = obj_to_index(obj_exts_slab->slab_cache,
- obj_exts_slab, obj_exts);
- struct slabobj_ext *ext = slab_obj_ext(obj_exts_slab,
+ unsigned int offs = obj_to_index(obj_slab->slab_cache,
+ obj_slab, obj);
+ struct slabobj_ext *ext = slab_obj_ext(obj_slab,
slab_exts, offs);
if (unlikely(is_codetag_empty(&ext->ref))) {
@@ -2090,7 +2090,7 @@ static inline void handle_failed_objexts_alloc(unsigned long obj_exts,
#else /* CONFIG_MEM_ALLOC_PROFILING_DEBUG */
-static inline void mark_objexts_empty(struct slabobj_ext *obj_exts) {}
+static inline void mark_obj_codetag_empty(const void *obj) {}
static inline bool mark_failed_objexts_alloc(struct slab *slab) { return false; }
static inline void handle_failed_objexts_alloc(unsigned long obj_exts,
struct slabobj_ext *vec, unsigned int objects) {}
@@ -2196,7 +2196,6 @@ int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
retry:
old_exts = READ_ONCE(slab->obj_exts);
handle_failed_objexts_alloc(old_exts, vec, objects);
- slab_set_stride(slab, sizeof(struct slabobj_ext));
if (new_slab) {
/*
@@ -2211,7 +2210,7 @@ retry:
* assign slabobj_exts in parallel. In this case the existing
* objcg vector should be reused.
*/
- mark_objexts_empty(vec);
+ mark_obj_codetag_empty(vec);
if (unlikely(!allow_spin))
kfree_nolock(vec);
else
@@ -2254,7 +2253,7 @@ static inline void free_slab_obj_exts(struct slab *slab, bool allow_spin)
* NULL, therefore replace NULL with CODETAG_EMPTY to indicate that
* the extension for obj_exts is expected to be NULL.
*/
- mark_objexts_empty(obj_exts);
+ mark_obj_codetag_empty(obj_exts);
if (allow_spin)
kfree(obj_exts);
else
@@ -2272,6 +2271,9 @@ static void alloc_slab_obj_exts_early(struct kmem_cache *s, struct slab *slab)
void *addr;
unsigned long obj_exts;
+ /* Initialize stride early to avoid memory ordering issues */
+ slab_set_stride(slab, sizeof(struct slabobj_ext));
+
if (!need_slab_obj_exts(s))
return;
@@ -2288,7 +2290,6 @@ static void alloc_slab_obj_exts_early(struct kmem_cache *s, struct slab *slab)
obj_exts |= MEMCG_DATA_OBJEXTS;
#endif
slab->obj_exts = obj_exts;
- slab_set_stride(slab, sizeof(struct slabobj_ext));
} else if (s->flags & SLAB_OBJ_EXT_IN_OBJ) {
unsigned int offset = obj_exts_offset_in_object(s);
@@ -2312,6 +2313,10 @@ static void alloc_slab_obj_exts_early(struct kmem_cache *s, struct slab *slab)
#else /* CONFIG_SLAB_OBJ_EXT */
+static inline void mark_obj_codetag_empty(const void *obj)
+{
+}
+
static inline void init_slab_obj_exts(struct slab *slab)
{
}
@@ -2783,6 +2788,15 @@ static inline struct slab_sheaf *alloc_empty_sheaf(struct kmem_cache *s,
static void free_empty_sheaf(struct kmem_cache *s, struct slab_sheaf *sheaf)
{
+ /*
+ * If the sheaf was created with __GFP_NO_OBJ_EXT flag then its
+ * corresponding extension is NULL and alloc_tag_sub() will throw a
+ * warning, therefore replace NULL with CODETAG_EMPTY to indicate
+ * that the extension for this sheaf is expected to be NULL.
+ */
+ if (s->flags & SLAB_KMALLOC)
+ mark_obj_codetag_empty(sheaf);
+
kfree(sheaf);
stat(s, SHEAF_FREE);
@@ -2822,7 +2836,7 @@ static struct slab_sheaf *alloc_full_sheaf(struct kmem_cache *s, gfp_t gfp)
if (!sheaf)
return NULL;
- if (refill_sheaf(s, sheaf, gfp | __GFP_NOMEMALLOC)) {
+ if (refill_sheaf(s, sheaf, gfp | __GFP_NOMEMALLOC | __GFP_NOWARN)) {
free_empty_sheaf(s, sheaf);
return NULL;
}
@@ -4575,7 +4589,7 @@ __pcs_replace_empty_main(struct kmem_cache *s, struct slub_percpu_sheaves *pcs,
return NULL;
if (empty) {
- if (!refill_sheaf(s, empty, gfp | __GFP_NOMEMALLOC)) {
+ if (!refill_sheaf(s, empty, gfp | __GFP_NOMEMALLOC | __GFP_NOWARN)) {
full = empty;
} else {
/*
@@ -4890,9 +4904,14 @@ EXPORT_SYMBOL(kmem_cache_alloc_node_noprof);
static int __prefill_sheaf_pfmemalloc(struct kmem_cache *s,
struct slab_sheaf *sheaf, gfp_t gfp)
{
- int ret = 0;
+ gfp_t gfp_nomemalloc;
+ int ret;
+
+ gfp_nomemalloc = gfp | __GFP_NOMEMALLOC;
+ if (gfp_pfmemalloc_allowed(gfp))
+ gfp_nomemalloc |= __GFP_NOWARN;
- ret = refill_sheaf(s, sheaf, gfp | __GFP_NOMEMALLOC);
+ ret = refill_sheaf(s, sheaf, gfp_nomemalloc);
if (likely(!ret || !gfp_pfmemalloc_allowed(gfp)))
return ret;