summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--include/linux/mm_inline.h11
-rw-r--r--mm/rmap.c7
-rw-r--r--mm/vmscan.c33
3 files changed, 41 insertions, 10 deletions
diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
index fa2d6ba811b5..2aedcff6a2c1 100644
--- a/include/linux/mm_inline.h
+++ b/include/linux/mm_inline.h
@@ -102,6 +102,12 @@ static __always_inline enum lru_list folio_lru_list(const struct folio *folio)
#ifdef CONFIG_LRU_GEN
+static inline bool lru_gen_switching(void)
+{
+ DECLARE_STATIC_KEY_FALSE(lru_switch);
+
+ return static_branch_unlikely(&lru_switch);
+}
#ifdef CONFIG_LRU_GEN_ENABLED
static inline bool lru_gen_enabled(void)
{
@@ -316,6 +322,11 @@ static inline bool lru_gen_enabled(void)
return false;
}
+static inline bool lru_gen_switching(void)
+{
+ return false;
+}
+
static inline bool lru_gen_in_fault(void)
{
return false;
diff --git a/mm/rmap.c b/mm/rmap.c
index abe4712a220c..78b7fb5f367c 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -973,7 +973,12 @@ static bool folio_referenced_one(struct folio *folio,
nr = folio_pte_batch(folio, pvmw.pte, pteval, max_nr);
}
- if (lru_gen_enabled() && pvmw.pte) {
+ /*
+ * When LRU is switching, we don’t know where the surrounding folios
+ * are. —they could be on active/inactive lists or on MGLRU. So the
+ * simplest approach is to disable this look-around optimization.
+ */
+ if (lru_gen_enabled() && !lru_gen_switching() && pvmw.pte) {
if (lru_gen_look_around(&pvmw, nr))
referenced++;
} else if (pvmw.pte) {
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 641a6063f375..42f834c508bc 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -905,7 +905,7 @@ static enum folio_references folio_check_references(struct folio *folio,
if (referenced_ptes == -1)
return FOLIOREF_KEEP;
- if (lru_gen_enabled()) {
+ if (lru_gen_enabled() && !lru_gen_switching()) {
if (!referenced_ptes)
return FOLIOREF_RECLAIM;
@@ -2308,7 +2308,7 @@ static void prepare_scan_control(pg_data_t *pgdat, struct scan_control *sc)
unsigned long file;
struct lruvec *target_lruvec;
- if (lru_gen_enabled())
+ if (lru_gen_enabled() && !lru_gen_switching())
return;
target_lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup, pgdat);
@@ -2647,6 +2647,7 @@ static bool can_age_anon_pages(struct lruvec *lruvec,
#ifdef CONFIG_LRU_GEN
+DEFINE_STATIC_KEY_FALSE(lru_switch);
#ifdef CONFIG_LRU_GEN_ENABLED
DEFINE_STATIC_KEY_ARRAY_TRUE(lru_gen_caps, NR_LRU_GEN_CAPS);
#define get_cap(cap) static_branch_likely(&lru_gen_caps[cap])
@@ -5181,6 +5182,8 @@ static void lru_gen_change_state(bool enabled)
if (enabled == lru_gen_enabled())
goto unlock;
+ static_branch_enable_cpuslocked(&lru_switch);
+
if (enabled)
static_branch_enable_cpuslocked(&lru_gen_caps[LRU_GEN_CORE]);
else
@@ -5211,6 +5214,9 @@ static void lru_gen_change_state(bool enabled)
cond_resched();
} while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)));
+
+ static_branch_disable_cpuslocked(&lru_switch);
+
unlock:
mutex_unlock(&state_mutex);
put_online_mems();
@@ -5783,9 +5789,12 @@ static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
bool proportional_reclaim;
struct blk_plug plug;
- if (lru_gen_enabled() && !root_reclaim(sc)) {
+ if ((lru_gen_enabled() || lru_gen_switching()) && !root_reclaim(sc)) {
lru_gen_shrink_lruvec(lruvec, sc);
- return;
+
+ if (!lru_gen_switching())
+ return;
+
}
get_scan_count(lruvec, sc, nr);
@@ -6045,10 +6054,13 @@ static void shrink_node(pg_data_t *pgdat, struct scan_control *sc)
struct lruvec *target_lruvec;
bool reclaimable = false;
- if (lru_gen_enabled() && root_reclaim(sc)) {
+ if ((lru_gen_enabled() || lru_gen_switching()) && root_reclaim(sc)) {
memset(&sc->nr, 0, sizeof(sc->nr));
lru_gen_shrink_node(pgdat, sc);
- return;
+
+ if (!lru_gen_switching())
+ return;
+
}
target_lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup, pgdat);
@@ -6318,7 +6330,7 @@ static void snapshot_refaults(struct mem_cgroup *target_memcg, pg_data_t *pgdat)
struct lruvec *target_lruvec;
unsigned long refaults;
- if (lru_gen_enabled())
+ if (lru_gen_enabled() && !lru_gen_switching())
return;
target_lruvec = mem_cgroup_lruvec(target_memcg, pgdat);
@@ -6708,9 +6720,12 @@ static void kswapd_age_node(struct pglist_data *pgdat, struct scan_control *sc)
struct mem_cgroup *memcg;
struct lruvec *lruvec;
- if (lru_gen_enabled()) {
+ if (lru_gen_enabled() || lru_gen_switching()) {
lru_gen_age_node(pgdat, sc);
- return;
+
+ if (!lru_gen_switching())
+ return;
+
}
lruvec = mem_cgroup_lruvec(NULL, pgdat);