summaryrefslogtreecommitdiff
path: root/include/linux
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/page-flags.h81
1 files changed, 73 insertions, 8 deletions
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 5c469d38dd69..43876b108f0a 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -198,6 +198,29 @@ enum pageflags {
#ifndef __GENERATING_BOUNDS_H
+/*
+ * For tail pages, if the size of struct page is power-of-2 ->compound_info
+ * encodes the mask that converts the address of the tail page address to
+ * the head page address.
+ *
+ * Otherwise, ->compound_info has direct pointer to head pages.
+ */
+static __always_inline bool compound_info_has_mask(void)
+{
+ /*
+ * Limit mask usage to HugeTLB vmemmap optimization (HVO) where it
+ * makes a difference.
+ *
+ * The approach with mask would work in the wider set of conditions,
+ * but it requires validating that struct pages are naturally aligned
+ * for all orders up to the MAX_FOLIO_ORDER, which can be tricky.
+ */
+ if (!IS_ENABLED(CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP))
+ return false;
+
+ return is_power_of_2(sizeof(struct page));
+}
+
#ifdef CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP
DECLARE_STATIC_KEY_FALSE(hugetlb_optimize_vmemmap_key);
@@ -207,6 +230,10 @@ DECLARE_STATIC_KEY_FALSE(hugetlb_optimize_vmemmap_key);
*/
static __always_inline const struct page *page_fixed_fake_head(const struct page *page)
{
+ /* Fake heads only exists if compound_info_has_mask() is true */
+ if (!compound_info_has_mask())
+ return page;
+
if (!static_branch_unlikely(&hugetlb_optimize_vmemmap_key))
return page;
@@ -223,10 +250,14 @@ static __always_inline const struct page *page_fixed_fake_head(const struct page
* because the @page is a compound page composed with at least
* two contiguous pages.
*/
- unsigned long head = READ_ONCE(page[1].compound_info);
+ unsigned long info = READ_ONCE(page[1].compound_info);
+
+ /* See set_compound_head() */
+ if (likely(info & 1)) {
+ unsigned long p = (unsigned long)page;
- if (likely(head & 1))
- return (const struct page *)(head - 1);
+ return (const struct page *)(p & info);
+ }
}
return page;
}
@@ -281,11 +312,26 @@ static __always_inline int page_is_fake_head(const struct page *page)
static __always_inline unsigned long _compound_head(const struct page *page)
{
- unsigned long head = READ_ONCE(page->compound_info);
+ unsigned long info = READ_ONCE(page->compound_info);
- if (unlikely(head & 1))
- return head - 1;
- return (unsigned long)page_fixed_fake_head(page);
+ /* Bit 0 encodes PageTail() */
+ if (!(info & 1))
+ return (unsigned long)page_fixed_fake_head(page);
+
+ /*
+ * If compound_info_has_mask() is false, the rest of compound_info is
+ * the pointer to the head page.
+ */
+ if (!compound_info_has_mask())
+ return info - 1;
+
+ /*
+ * If compound_info_has_mask() is true the rest of the info encodes
+ * the mask that converts the address of the tail page to the head page.
+ *
+ * No need to clear bit 0 in the mask as 'page' always has it clear.
+ */
+ return (unsigned long)page & info;
}
#define compound_head(page) ((typeof(page))_compound_head(page))
@@ -293,7 +339,26 @@ static __always_inline unsigned long _compound_head(const struct page *page)
static __always_inline void set_compound_head(struct page *tail,
const struct page *head, unsigned int order)
{
- WRITE_ONCE(tail->compound_info, (unsigned long)head + 1);
+ unsigned int shift;
+ unsigned long mask;
+
+ if (!compound_info_has_mask()) {
+ WRITE_ONCE(tail->compound_info, (unsigned long)head | 1);
+ return;
+ }
+
+ /*
+ * If the size of struct page is power-of-2, bits [shift:0] of the
+ * virtual address of compound head are zero.
+ *
+ * Calculate mask that can be applied to the virtual address of
+ * the tail page to get address of the head page.
+ */
+ shift = order + order_base_2(sizeof(struct page));
+ mask = GENMASK(BITS_PER_LONG - 1, shift);
+
+ /* Bit 0 encodes PageTail() */
+ WRITE_ONCE(tail->compound_info, mask | 1);
}
static __always_inline void clear_compound_head(struct page *page)