summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorAlexei Starovoitov <ast@kernel.org>2026-03-01 20:03:45 +0300
committerAlexei Starovoitov <ast@kernel.org>2026-03-01 20:04:00 +0300
commit309d8808eef93d29b65ae69241a4475b2c8bd6fe (patch)
treec42851bcd612348cfeee1857c0b92a5cfc53bf50 /mm
parentf620af11c27b8ec9994a39fe968aa778112d1566 (diff)
parenteb71ab2bf72260054677e348498ba995a057c463 (diff)
downloadlinux-309d8808eef93d29b65ae69241a4475b2c8bd6fe.tar.xz
Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf before 7.0-rc2
Cross-merge BPF and other fixes after downstream PR. No conflicts. Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/damon/core.c3
-rw-r--r--mm/huge_memory.c3
-rw-r--r--mm/kfence/core.c29
-rw-r--r--mm/memfd_luo.c7
-rw-r--r--mm/mm_init.c6
-rw-r--r--mm/page_alloc.c3
-rw-r--r--mm/slab.h4
-rw-r--r--mm/slub.c51
8 files changed, 79 insertions, 27 deletions
diff --git a/mm/damon/core.c b/mm/damon/core.c
index 01eba1a547d4..adfc52fee9dc 100644
--- a/mm/damon/core.c
+++ b/mm/damon/core.c
@@ -1252,6 +1252,9 @@ int damon_commit_ctx(struct damon_ctx *dst, struct damon_ctx *src)
{
int err;
+ if (!is_power_of_2(src->min_region_sz))
+ return -EINVAL;
+
err = damon_commit_schemes(dst, src);
if (err)
return err;
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index d4ca8cfd7f9d..8e2746ea74ad 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -94,6 +94,9 @@ static inline bool file_thp_enabled(struct vm_area_struct *vma)
inode = file_inode(vma->vm_file);
+ if (IS_ANON_FILE(inode))
+ return false;
+
return !inode_is_open_for_write(inode) && S_ISREG(inode->i_mode);
}
diff --git a/mm/kfence/core.c b/mm/kfence/core.c
index b4ea3262c925..7393957f9a20 100644
--- a/mm/kfence/core.c
+++ b/mm/kfence/core.c
@@ -13,6 +13,7 @@
#include <linux/hash.h>
#include <linux/irq_work.h>
#include <linux/jhash.h>
+#include <linux/kasan-enabled.h>
#include <linux/kcsan-checks.h>
#include <linux/kfence.h>
#include <linux/kmemleak.h>
@@ -917,6 +918,20 @@ void __init kfence_alloc_pool_and_metadata(void)
return;
/*
+ * If KASAN hardware tags are enabled, disable KFENCE, because it
+ * does not support MTE yet.
+ */
+ if (kasan_hw_tags_enabled()) {
+ pr_info("disabled as KASAN HW tags are enabled\n");
+ if (__kfence_pool) {
+ memblock_free(__kfence_pool, KFENCE_POOL_SIZE);
+ __kfence_pool = NULL;
+ }
+ kfence_sample_interval = 0;
+ return;
+ }
+
+ /*
* If the pool has already been initialized by arch, there is no need to
* re-allocate the memory pool.
*/
@@ -989,14 +1004,14 @@ static int kfence_init_late(void)
#ifdef CONFIG_CONTIG_ALLOC
struct page *pages;
- pages = alloc_contig_pages(nr_pages_pool, GFP_KERNEL, first_online_node,
- NULL);
+ pages = alloc_contig_pages(nr_pages_pool, GFP_KERNEL | __GFP_SKIP_KASAN,
+ first_online_node, NULL);
if (!pages)
return -ENOMEM;
__kfence_pool = page_to_virt(pages);
- pages = alloc_contig_pages(nr_pages_meta, GFP_KERNEL, first_online_node,
- NULL);
+ pages = alloc_contig_pages(nr_pages_meta, GFP_KERNEL | __GFP_SKIP_KASAN,
+ first_online_node, NULL);
if (pages)
kfence_metadata_init = page_to_virt(pages);
#else
@@ -1006,11 +1021,13 @@ static int kfence_init_late(void)
return -EINVAL;
}
- __kfence_pool = alloc_pages_exact(KFENCE_POOL_SIZE, GFP_KERNEL);
+ __kfence_pool = alloc_pages_exact(KFENCE_POOL_SIZE,
+ GFP_KERNEL | __GFP_SKIP_KASAN);
if (!__kfence_pool)
return -ENOMEM;
- kfence_metadata_init = alloc_pages_exact(KFENCE_METADATA_SIZE, GFP_KERNEL);
+ kfence_metadata_init = alloc_pages_exact(KFENCE_METADATA_SIZE,
+ GFP_KERNEL | __GFP_SKIP_KASAN);
#endif
if (!kfence_metadata_init)
diff --git a/mm/memfd_luo.c b/mm/memfd_luo.c
index 5c17da3880c5..e485b828d173 100644
--- a/mm/memfd_luo.c
+++ b/mm/memfd_luo.c
@@ -326,7 +326,12 @@ static void memfd_luo_finish(struct liveupdate_file_op_args *args)
struct memfd_luo_folio_ser *folios_ser;
struct memfd_luo_ser *ser;
- if (args->retrieved)
+ /*
+ * If retrieve was successful, nothing to do. If it failed, retrieve()
+ * already cleaned up everything it could. So nothing to do there
+ * either. Only need to clean up when retrieve was not called.
+ */
+ if (args->retrieve_status)
return;
ser = phys_to_virt(args->serialized_data);
diff --git a/mm/mm_init.c b/mm/mm_init.c
index 61d983d23f55..df34797691bd 100644
--- a/mm/mm_init.c
+++ b/mm/mm_init.c
@@ -1896,7 +1896,11 @@ static void __init free_area_init(void)
for_each_node(nid) {
pg_data_t *pgdat;
- if (!node_online(nid))
+ /*
+ * If an architecture has not allocated node data for
+ * this node, presume the node is memoryless or offline.
+ */
+ if (!NODE_DATA(nid))
alloc_offline_node_data(nid);
pgdat = NODE_DATA(nid);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index fcc32737f451..2d4b6f1a554e 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -6928,7 +6928,8 @@ static int __alloc_contig_verify_gfp_mask(gfp_t gfp_mask, gfp_t *gfp_cc_mask)
{
const gfp_t reclaim_mask = __GFP_IO | __GFP_FS | __GFP_RECLAIM;
const gfp_t action_mask = __GFP_COMP | __GFP_RETRY_MAYFAIL | __GFP_NOWARN |
- __GFP_ZERO | __GFP_ZEROTAGS | __GFP_SKIP_ZERO;
+ __GFP_ZERO | __GFP_ZEROTAGS | __GFP_SKIP_ZERO |
+ __GFP_SKIP_KASAN;
const gfp_t cc_action_mask = __GFP_RETRY_MAYFAIL | __GFP_NOWARN;
/*
diff --git a/mm/slab.h b/mm/slab.h
index 71c7261bf822..f6ef862b60ef 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -290,14 +290,14 @@ static inline void *nearest_obj(struct kmem_cache *cache,
/* Determine object index from a given position */
static inline unsigned int __obj_to_index(const struct kmem_cache *cache,
- void *addr, void *obj)
+ void *addr, const void *obj)
{
return reciprocal_divide(kasan_reset_tag(obj) - addr,
cache->reciprocal_size);
}
static inline unsigned int obj_to_index(const struct kmem_cache *cache,
- const struct slab *slab, void *obj)
+ const struct slab *slab, const void *obj)
{
if (is_kfence_address(obj))
return 0;
diff --git a/mm/slub.c b/mm/slub.c
index 862642c165ed..0c906fefc31b 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2041,18 +2041,18 @@ static inline void dec_slabs_node(struct kmem_cache *s, int node,
#ifdef CONFIG_MEM_ALLOC_PROFILING_DEBUG
-static inline void mark_objexts_empty(struct slabobj_ext *obj_exts)
+static inline void mark_obj_codetag_empty(const void *obj)
{
- struct slab *obj_exts_slab;
+ struct slab *obj_slab;
unsigned long slab_exts;
- obj_exts_slab = virt_to_slab(obj_exts);
- slab_exts = slab_obj_exts(obj_exts_slab);
+ obj_slab = virt_to_slab(obj);
+ slab_exts = slab_obj_exts(obj_slab);
if (slab_exts) {
get_slab_obj_exts(slab_exts);
- unsigned int offs = obj_to_index(obj_exts_slab->slab_cache,
- obj_exts_slab, obj_exts);
- struct slabobj_ext *ext = slab_obj_ext(obj_exts_slab,
+ unsigned int offs = obj_to_index(obj_slab->slab_cache,
+ obj_slab, obj);
+ struct slabobj_ext *ext = slab_obj_ext(obj_slab,
slab_exts, offs);
if (unlikely(is_codetag_empty(&ext->ref))) {
@@ -2090,7 +2090,7 @@ static inline void handle_failed_objexts_alloc(unsigned long obj_exts,
#else /* CONFIG_MEM_ALLOC_PROFILING_DEBUG */
-static inline void mark_objexts_empty(struct slabobj_ext *obj_exts) {}
+static inline void mark_obj_codetag_empty(const void *obj) {}
static inline bool mark_failed_objexts_alloc(struct slab *slab) { return false; }
static inline void handle_failed_objexts_alloc(unsigned long obj_exts,
struct slabobj_ext *vec, unsigned int objects) {}
@@ -2196,7 +2196,6 @@ int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
retry:
old_exts = READ_ONCE(slab->obj_exts);
handle_failed_objexts_alloc(old_exts, vec, objects);
- slab_set_stride(slab, sizeof(struct slabobj_ext));
if (new_slab) {
/*
@@ -2211,7 +2210,7 @@ retry:
* assign slabobj_exts in parallel. In this case the existing
* objcg vector should be reused.
*/
- mark_objexts_empty(vec);
+ mark_obj_codetag_empty(vec);
if (unlikely(!allow_spin))
kfree_nolock(vec);
else
@@ -2254,7 +2253,7 @@ static inline void free_slab_obj_exts(struct slab *slab, bool allow_spin)
* NULL, therefore replace NULL with CODETAG_EMPTY to indicate that
* the extension for obj_exts is expected to be NULL.
*/
- mark_objexts_empty(obj_exts);
+ mark_obj_codetag_empty(obj_exts);
if (allow_spin)
kfree(obj_exts);
else
@@ -2272,6 +2271,9 @@ static void alloc_slab_obj_exts_early(struct kmem_cache *s, struct slab *slab)
void *addr;
unsigned long obj_exts;
+ /* Initialize stride early to avoid memory ordering issues */
+ slab_set_stride(slab, sizeof(struct slabobj_ext));
+
if (!need_slab_obj_exts(s))
return;
@@ -2288,7 +2290,6 @@ static void alloc_slab_obj_exts_early(struct kmem_cache *s, struct slab *slab)
obj_exts |= MEMCG_DATA_OBJEXTS;
#endif
slab->obj_exts = obj_exts;
- slab_set_stride(slab, sizeof(struct slabobj_ext));
} else if (s->flags & SLAB_OBJ_EXT_IN_OBJ) {
unsigned int offset = obj_exts_offset_in_object(s);
@@ -2312,6 +2313,10 @@ static void alloc_slab_obj_exts_early(struct kmem_cache *s, struct slab *slab)
#else /* CONFIG_SLAB_OBJ_EXT */
+static inline void mark_obj_codetag_empty(const void *obj)
+{
+}
+
static inline void init_slab_obj_exts(struct slab *slab)
{
}
@@ -2783,6 +2788,15 @@ static inline struct slab_sheaf *alloc_empty_sheaf(struct kmem_cache *s,
static void free_empty_sheaf(struct kmem_cache *s, struct slab_sheaf *sheaf)
{
+ /*
+ * If the sheaf was created with __GFP_NO_OBJ_EXT flag then its
+ * corresponding extension is NULL and alloc_tag_sub() will throw a
+ * warning, therefore replace NULL with CODETAG_EMPTY to indicate
+ * that the extension for this sheaf is expected to be NULL.
+ */
+ if (s->flags & SLAB_KMALLOC)
+ mark_obj_codetag_empty(sheaf);
+
kfree(sheaf);
stat(s, SHEAF_FREE);
@@ -2822,7 +2836,7 @@ static struct slab_sheaf *alloc_full_sheaf(struct kmem_cache *s, gfp_t gfp)
if (!sheaf)
return NULL;
- if (refill_sheaf(s, sheaf, gfp | __GFP_NOMEMALLOC)) {
+ if (refill_sheaf(s, sheaf, gfp | __GFP_NOMEMALLOC | __GFP_NOWARN)) {
free_empty_sheaf(s, sheaf);
return NULL;
}
@@ -4575,7 +4589,7 @@ __pcs_replace_empty_main(struct kmem_cache *s, struct slub_percpu_sheaves *pcs,
return NULL;
if (empty) {
- if (!refill_sheaf(s, empty, gfp | __GFP_NOMEMALLOC)) {
+ if (!refill_sheaf(s, empty, gfp | __GFP_NOMEMALLOC | __GFP_NOWARN)) {
full = empty;
} else {
/*
@@ -4890,9 +4904,14 @@ EXPORT_SYMBOL(kmem_cache_alloc_node_noprof);
static int __prefill_sheaf_pfmemalloc(struct kmem_cache *s,
struct slab_sheaf *sheaf, gfp_t gfp)
{
- int ret = 0;
+ gfp_t gfp_nomemalloc;
+ int ret;
+
+ gfp_nomemalloc = gfp | __GFP_NOMEMALLOC;
+ if (gfp_pfmemalloc_allowed(gfp))
+ gfp_nomemalloc |= __GFP_NOWARN;
- ret = refill_sheaf(s, sheaf, gfp | __GFP_NOMEMALLOC);
+ ret = refill_sheaf(s, sheaf, gfp_nomemalloc);
if (likely(!ret || !gfp_pfmemalloc_allowed(gfp)))
return ret;