summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMatthew Wilcox (Oracle) <willy@infradead.org>2023-01-11 17:29:10 +0300
committerAndrew Morton <akpm@linux-foundation.org>2023-02-03 09:33:00 +0300
commit4375a553f46c6cb66d1711d8f514dfdf34ce74b0 (patch)
tree564bd1c2948ac25cd7ef406f3073ddc53d67430e
parenta8d55327ccc1f999a5fba4eee67ed08bd36493ad (diff)
downloadlinux-4375a553f46c6cb66d1711d8f514dfdf34ce74b0.tar.xz
mm: move page->deferred_list to folio->_deferred_list
Remove the entire block of definitions for the second tail page, and add the deferred list to the struct folio. This actually moves _deferred_list to a different offset in struct folio because I don't see a need to include the padding. This lets us use list_for_each_entry_safe() in deferred_split_scan() and avoid a number of calls to compound_head(). Link: https://lkml.kernel.org/r/20230111142915.1001531-25-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
-rw-r--r--include/linux/huge_mm.h9
-rw-r--r--include/linux/mm_types.h14
-rw-r--r--mm/huge_memory.c32
3 files changed, 27 insertions, 28 deletions
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index a1341fdcf666..aacfcb02606f 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -295,11 +295,10 @@ static inline bool thp_migration_supported(void)
static inline struct list_head *page_deferred_list(struct page *page)
{
- /*
- * See organization of tail pages of compound page in
- * "struct page" definition.
- */
- return &page[2].deferred_list;
+ struct folio *folio = (struct folio *)page;
+
+ VM_BUG_ON_FOLIO(folio_order(folio) < 2, folio);
+ return &folio->_deferred_list;
}
#else /* CONFIG_TRANSPARENT_HUGEPAGE */
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index d458e9b8496c..7eb4d0815a78 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -141,12 +141,6 @@ struct page {
struct { /* Tail pages of compound page */
unsigned long compound_head; /* Bit zero is set */
};
- struct { /* Second tail page of transparent huge page */
- unsigned long _compound_pad_1; /* compound_head */
- unsigned long _compound_pad_2;
- /* For both global and memcg */
- struct list_head deferred_list;
- };
struct { /* Second tail page of hugetlb page */
unsigned long _hugetlb_pad_1; /* compound_head */
void *hugetlb_subpool;
@@ -302,6 +296,7 @@ static inline struct page *encoded_page_ptr(struct encoded_page *page)
* @_hugetlb_cgroup: Do not use directly, use accessor in hugetlb_cgroup.h.
* @_hugetlb_cgroup_rsvd: Do not use directly, use accessor in hugetlb_cgroup.h.
* @_hugetlb_hwpoison: Do not use directly, call raw_hwp_list_head().
+ * @_deferred_list: Folios to be split under memory pressure.
*
* A folio is a physically, virtually and logically contiguous set
* of bytes. It is a power-of-two in size, and it is aligned to that
@@ -368,6 +363,13 @@ struct folio {
void *_hugetlb_hwpoison;
/* private: the union with struct page is transitional */
};
+ struct {
+ unsigned long _flags_2a;
+ unsigned long _head_2a;
+ /* public: */
+ struct list_head _deferred_list;
+ /* private: the union with struct page is transitional */
+ };
struct page __page_2;
};
};
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index bfa960f012fa..a4138daaa0b8 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -2756,9 +2756,9 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
/* Prevent deferred_split_scan() touching ->_refcount */
spin_lock(&ds_queue->split_queue_lock);
if (folio_ref_freeze(folio, 1 + extra_pins)) {
- if (!list_empty(page_deferred_list(&folio->page))) {
+ if (!list_empty(&folio->_deferred_list)) {
ds_queue->split_queue_len--;
- list_del(page_deferred_list(&folio->page));
+ list_del(&folio->_deferred_list);
}
spin_unlock(&ds_queue->split_queue_lock);
if (mapping) {
@@ -2873,8 +2873,8 @@ static unsigned long deferred_split_scan(struct shrinker *shrink,
struct pglist_data *pgdata = NODE_DATA(sc->nid);
struct deferred_split *ds_queue = &pgdata->deferred_split_queue;
unsigned long flags;
- LIST_HEAD(list), *pos, *next;
- struct page *page;
+ LIST_HEAD(list);
+ struct folio *folio, *next;
int split = 0;
#ifdef CONFIG_MEMCG
@@ -2884,14 +2884,13 @@ static unsigned long deferred_split_scan(struct shrinker *shrink,
spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
/* Take pin on all head pages to avoid freeing them under us */
- list_for_each_safe(pos, next, &ds_queue->split_queue) {
- page = list_entry((void *)pos, struct page, deferred_list);
- page = compound_head(page);
- if (get_page_unless_zero(page)) {
- list_move(page_deferred_list(page), &list);
+ list_for_each_entry_safe(folio, next, &ds_queue->split_queue,
+ _deferred_list) {
+ if (folio_try_get(folio)) {
+ list_move(&folio->_deferred_list, &list);
} else {
- /* We lost race with put_compound_page() */
- list_del_init(page_deferred_list(page));
+ /* We lost race with folio_put() */
+ list_del_init(&folio->_deferred_list);
ds_queue->split_queue_len--;
}
if (!--sc->nr_to_scan)
@@ -2899,16 +2898,15 @@ static unsigned long deferred_split_scan(struct shrinker *shrink,
}
spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
- list_for_each_safe(pos, next, &list) {
- page = list_entry((void *)pos, struct page, deferred_list);
- if (!trylock_page(page))
+ list_for_each_entry_safe(folio, next, &list, _deferred_list) {
+ if (!folio_trylock(folio))
goto next;
/* split_huge_page() removes page from list on success */
- if (!split_huge_page(page))
+ if (!split_folio(folio))
split++;
- unlock_page(page);
+ folio_unlock(folio);
next:
- put_page(page);
+ folio_put(folio);
}
spin_lock_irqsave(&ds_queue->split_queue_lock, flags);