summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorJoel Stanley <joel@jms.id.au>2020-07-22 12:42:41 +0300
committerJoel Stanley <joel@jms.id.au>2020-07-22 12:42:46 +0300
commit8a9b346382056b52cd7ff141ae9f15a0fcfeb13d (patch)
tree7b855ed138c412bc27713ea8d3feef8939c954a0 /mm
parent2b4829edfc1c225c717652153097470529d171db (diff)
parentd811d29517d1ea05bc159579231652d3ca1c2a01 (diff)
downloadlinux-8a9b346382056b52cd7ff141ae9f15a0fcfeb13d.tar.xz
Merge tag 'v5.4.53' into dev-5.4
This is the 5.4.53 stable release Signed-off-by: Joel Stanley <joel@jms.id.au>
Diffstat (limited to 'mm')
-rw-r--r--mm/compaction.c19
-rw-r--r--mm/memcontrol.c4
-rw-r--r--mm/slab_common.c2
-rw-r--r--mm/slub.c30
-rw-r--r--mm/swap_state.c4
5 files changed, 51 insertions, 8 deletions
diff --git a/mm/compaction.c b/mm/compaction.c
index 672d3c78c6ab..92470625f0b1 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -2310,16 +2310,26 @@ static enum compact_result compact_zone_order(struct zone *zone, int order,
.page = NULL,
};
- if (capture)
- current->capture_control = &capc;
+ /*
+ * Make sure the structs are really initialized before we expose the
+ * capture control, in case we are interrupted and the interrupt handler
+ * frees a page.
+ */
+ barrier();
+ WRITE_ONCE(current->capture_control, &capc);
ret = compact_zone(&cc, &capc);
VM_BUG_ON(!list_empty(&cc.freepages));
VM_BUG_ON(!list_empty(&cc.migratepages));
- *capture = capc.page;
- current->capture_control = NULL;
+ /*
+ * Make sure we hide capture control first before we read the captured
+ * page pointer, otherwise an interrupt could free and capture a page
+ * and we would leak it.
+ */
+ WRITE_ONCE(current->capture_control, NULL);
+ *capture = READ_ONCE(capc.page);
return ret;
}
@@ -2333,6 +2343,7 @@ int sysctl_extfrag_threshold = 500;
* @alloc_flags: The allocation flags of the current allocation
* @ac: The context of current allocation
* @prio: Determines how hard direct compaction should try to succeed
+ * @capture: Pointer to free page created by compaction will be stored here
*
* This is the main entry point for direct page compaction.
*/
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 0d6f3ea86738..a3f4c35bb5fa 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -2895,8 +2895,10 @@ static void memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg,
return;
cw = kmalloc(sizeof(*cw), GFP_NOWAIT | __GFP_NOWARN);
- if (!cw)
+ if (!cw) {
+ css_put(&memcg->css);
return;
+ }
cw->memcg = memcg;
cw->cachep = cachep;
diff --git a/mm/slab_common.c b/mm/slab_common.c
index ade6c257d4b4..8c1ffbf7de45 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -1740,7 +1740,7 @@ void kzfree(const void *p)
if (unlikely(ZERO_OR_NULL_PTR(mem)))
return;
ks = ksize(mem);
- memset(mem, 0, ks);
+ memzero_explicit(mem, ks);
kfree(mem);
}
EXPORT_SYMBOL(kzfree);
diff --git a/mm/slub.c b/mm/slub.c
index fca33abd6c42..709e31002504 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -644,6 +644,20 @@ static void slab_fix(struct kmem_cache *s, char *fmt, ...)
va_end(args);
}
+static bool freelist_corrupted(struct kmem_cache *s, struct page *page,
+ void *freelist, void *nextfree)
+{
+ if ((s->flags & SLAB_CONSISTENCY_CHECKS) &&
+ !check_valid_pointer(s, page, nextfree)) {
+ object_err(s, page, freelist, "Freechain corrupt");
+ freelist = NULL;
+ slab_fix(s, "Isolate corrupted freechain");
+ return true;
+ }
+
+ return false;
+}
+
static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
{
unsigned int off; /* Offset of last byte */
@@ -1379,6 +1393,11 @@ static inline void inc_slabs_node(struct kmem_cache *s, int node,
static inline void dec_slabs_node(struct kmem_cache *s, int node,
int objects) {}
+static bool freelist_corrupted(struct kmem_cache *s, struct page *page,
+ void *freelist, void *nextfree)
+{
+ return false;
+}
#endif /* CONFIG_SLUB_DEBUG */
/*
@@ -2062,6 +2081,14 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page,
void *prior;
unsigned long counters;
+ /*
+ * If 'nextfree' is invalid, it is possible that the object at
+ * 'freelist' is already corrupted. So isolate all objects
+ * starting at 'freelist'.
+ */
+ if (freelist_corrupted(s, page, freelist, nextfree))
+ break;
+
do {
prior = page->freelist;
counters = page->counters;
@@ -5621,7 +5648,8 @@ static void memcg_propagate_slab_attrs(struct kmem_cache *s)
*/
if (buffer)
buf = buffer;
- else if (root_cache->max_attr_size < ARRAY_SIZE(mbuf))
+ else if (root_cache->max_attr_size < ARRAY_SIZE(mbuf) &&
+ !IS_ENABLED(CONFIG_SLUB_STATS))
buf = mbuf;
else {
buffer = (char *) get_zeroed_page(GFP_KERNEL);
diff --git a/mm/swap_state.c b/mm/swap_state.c
index 8e7ce9a9bc5e..4ce014dc4571 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -23,6 +23,7 @@
#include <linux/huge_mm.h>
#include <asm/pgtable.h>
+#include "internal.h"
/*
* swapper_space is a fiction, retained to simplify the path through
@@ -418,7 +419,8 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
/* May fail (-ENOMEM) if XArray node allocation failed. */
__SetPageLocked(new_page);
__SetPageSwapBacked(new_page);
- err = add_to_swap_cache(new_page, entry, gfp_mask & GFP_KERNEL);
+ err = add_to_swap_cache(new_page, entry,
+ gfp_mask & GFP_RECLAIM_MASK);
if (likely(!err)) {
/* Initiate read into locked page */
SetPageWorkingset(new_page);