summaryrefslogtreecommitdiff
path: root/include/linux/mm.h
diff options
context:
space:
mode:
authorKirill A. Shutemov <kirill.shutemov@linux.intel.com>2013-11-15 02:31:51 +0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-11-15 04:32:20 +0400
commit49076ec2ccaf68610aa03d96bced9a6694b93ca1 (patch)
tree876564edb5cd164c7f9eaf39008f8f01fb164db6 /include/linux/mm.h
parentf820e2805c7acb157a78438d07e47f4fc57fe679 (diff)
downloadlinux-49076ec2ccaf68610aa03d96bced9a6694b93ca1.tar.xz
mm: dynamically allocate page->ptl if it cannot be embedded to struct page
If split page table lock is in use, we embed the lock into struct page of table's page. We have to disable split lock, if spinlock_t is too big be to be embedded, like when DEBUG_SPINLOCK or DEBUG_LOCK_ALLOC enabled. This patch add support for dynamic allocation of split page table lock if we can't embed it to struct page. page->ptl is unsigned long now and we use it as spinlock_t if sizeof(spinlock_t) <= sizeof(long), otherwise it's pointer to spinlock_t. The spinlock_t allocated in pgtable_page_ctor() for PTE table and in pgtable_pmd_page_ctor() for PMD table. All other helpers converted to support dynamically allocated page->ptl. Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Reviewed-by: Peter Zijlstra <peterz@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/linux/mm.h')
-rw-r--r--include/linux/mm.h81
1 files changed, 61 insertions, 20 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h
index e855351c3361..d0339741b6ce 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1317,32 +1317,73 @@ static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long a
#endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
#if USE_SPLIT_PTE_PTLOCKS
-/*
- * We tuck a spinlock to guard each pagetable page into its struct page,
- * at page->private, with BUILD_BUG_ON to make sure that this will not
- * overflow into the next struct page (as it might with DEBUG_SPINLOCK).
- * When freeing, reset page->mapping so free_pages_check won't complain.
- */
-#define __pte_lockptr(page) &((page)->ptl)
-#define pte_lock_init(_page) do { \
- spin_lock_init(__pte_lockptr(_page)); \
-} while (0)
-#define pte_lock_deinit(page) ((page)->mapping = NULL)
-#define pte_lockptr(mm, pmd) ({(void)(mm); __pte_lockptr(pmd_page(*(pmd)));})
+bool __ptlock_alloc(struct page *page);
+void __ptlock_free(struct page *page);
+static inline bool ptlock_alloc(struct page *page)
+{
+ if (sizeof(spinlock_t) > sizeof(page->ptl))
+ return __ptlock_alloc(page);
+ return true;
+}
+static inline void ptlock_free(struct page *page)
+{
+ if (sizeof(spinlock_t) > sizeof(page->ptl))
+ __ptlock_free(page);
+}
+
+static inline spinlock_t *ptlock_ptr(struct page *page)
+{
+ if (sizeof(spinlock_t) > sizeof(page->ptl))
+ return (spinlock_t *) page->ptl;
+ else
+ return (spinlock_t *) &page->ptl;
+}
+
+static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
+{
+ return ptlock_ptr(pmd_page(*pmd));
+}
+
+static inline bool ptlock_init(struct page *page)
+{
+ /*
+ * prep_new_page() initialize page->private (and therefore page->ptl)
+ * with 0. Make sure nobody took it in use in between.
+ *
+ * It can happen if arch try to use slab for page table allocation:
+ * slab code uses page->slab_cache and page->first_page (for tail
+ * pages), which share storage with page->ptl.
+ */
+ VM_BUG_ON(page->ptl);
+ if (!ptlock_alloc(page))
+ return false;
+ spin_lock_init(ptlock_ptr(page));
+ return true;
+}
+
+/* Reset page->mapping so free_pages_check won't complain. */
+static inline void pte_lock_deinit(struct page *page)
+{
+ page->mapping = NULL;
+ ptlock_free(page);
+}
+
#else /* !USE_SPLIT_PTE_PTLOCKS */
/*
* We use mm->page_table_lock to guard all pagetable pages of the mm.
*/
-#define pte_lock_init(page) do {} while (0)
-#define pte_lock_deinit(page) do {} while (0)
-#define pte_lockptr(mm, pmd) ({(void)(pmd); &(mm)->page_table_lock;})
+static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
+{
+ return &mm->page_table_lock;
+}
+static inline bool ptlock_init(struct page *page) { return true; }
+static inline void pte_lock_deinit(struct page *page) {}
#endif /* USE_SPLIT_PTE_PTLOCKS */
static inline bool pgtable_page_ctor(struct page *page)
{
- pte_lock_init(page);
inc_zone_page_state(page, NR_PAGETABLE);
- return true;
+ return ptlock_init(page);
}
static inline void pgtable_page_dtor(struct page *page)
@@ -1383,16 +1424,15 @@ static inline void pgtable_page_dtor(struct page *page)
static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd)
{
- return &virt_to_page(pmd)->ptl;
+ return ptlock_ptr(virt_to_page(pmd));
}
static inline bool pgtable_pmd_page_ctor(struct page *page)
{
- spin_lock_init(&page->ptl);
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
page->pmd_huge_pte = NULL;
#endif
- return true;
+ return ptlock_init(page);
}
static inline void pgtable_pmd_page_dtor(struct page *page)
@@ -1400,6 +1440,7 @@ static inline void pgtable_pmd_page_dtor(struct page *page)
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
VM_BUG_ON(page->pmd_huge_pte);
#endif
+ ptlock_free(page);
}
#define pmd_huge_pte(mm, pmd) (virt_to_page(pmd)->pmd_huge_pte)