summaryrefslogtreecommitdiff
path: root/include/linux/mm.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/mm.h')
-rw-r--r--include/linux/mm.h38
1 files changed, 7 insertions, 31 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 866a3dbe5c75..c61ba10768ea 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -7,6 +7,7 @@
#include <linux/gfp.h>
#include <linux/list.h>
+#include <linux/mmdebug.h>
#include <linux/mmzone.h>
#include <linux/rbtree.h>
#include <linux/prio_tree.h>
@@ -73,7 +74,7 @@ extern unsigned int kobjsize(const void *objp);
#endif
/*
- * vm_flags..
+ * vm_flags in vm_area_struct, see mm_types.h.
*/
#define VM_READ 0x00000001 /* currently active flags */
#define VM_WRITE 0x00000002
@@ -219,12 +220,6 @@ struct inode;
*/
#include <linux/page-flags.h>
-#ifdef CONFIG_DEBUG_VM
-#define VM_BUG_ON(cond) BUG_ON(cond)
-#else
-#define VM_BUG_ON(condition) do { } while(0)
-#endif
-
/*
* Methods to modify the page usage count.
*
@@ -744,6 +739,8 @@ struct zap_details {
struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
pte_t pte);
+int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
+ unsigned long size);
unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address,
unsigned long size, struct zap_details *);
unsigned long unmap_vmas(struct mmu_gather **tlb,
@@ -832,7 +829,6 @@ extern int mprotect_fixup(struct vm_area_struct *vma,
struct vm_area_struct **pprev, unsigned long start,
unsigned long end, unsigned long newflags);
-#ifdef CONFIG_HAVE_GET_USER_PAGES_FAST
/*
* get_user_pages_fast provides equivalent functionality to get_user_pages,
* operating on current and current->mm (force=0 and doesn't return any vmas).
@@ -846,25 +842,6 @@ extern int mprotect_fixup(struct vm_area_struct *vma,
int get_user_pages_fast(unsigned long start, int nr_pages, int write,
struct page **pages);
-#else
-/*
- * Should probably be moved to asm-generic, and architectures can include it if
- * they don't implement their own get_user_pages_fast.
- */
-#define get_user_pages_fast(start, nr_pages, write, pages) \
-({ \
- struct mm_struct *mm = current->mm; \
- int ret; \
- \
- down_read(&mm->mmap_sem); \
- ret = get_user_pages(current, mm, start, nr_pages, \
- write, 0, pages, NULL); \
- up_read(&mm->mmap_sem); \
- \
- ret; \
-})
-#endif
-
/*
* A callback you can register to apply pressure to ageable caches.
*
@@ -937,7 +914,7 @@ static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long a
}
#endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
-#if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS
+#if USE_SPLIT_PTLOCKS
/*
* We tuck a spinlock to guard each pagetable page into its struct page,
* at page->private, with BUILD_BUG_ON to make sure that this will not
@@ -950,14 +927,14 @@ static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long a
} while (0)
#define pte_lock_deinit(page) ((page)->mapping = NULL)
#define pte_lockptr(mm, pmd) ({(void)(mm); __pte_lockptr(pmd_page(*(pmd)));})
-#else
+#else /* !USE_SPLIT_PTLOCKS */
/*
* We use mm->page_table_lock to guard all pagetable pages of the mm.
*/
#define pte_lock_init(page) do {} while (0)
#define pte_lock_deinit(page) do {} while (0)
#define pte_lockptr(mm, pmd) ({(void)(pmd); &(mm)->page_table_lock;})
-#endif /* NR_CPUS < CONFIG_SPLIT_PTLOCK_CPUS */
+#endif /* USE_SPLIT_PTLOCKS */
static inline void pgtable_page_ctor(struct page *page)
{
@@ -1041,7 +1018,6 @@ extern unsigned long absent_pages_in_range(unsigned long start_pfn,
extern void get_pfn_range_for_nid(unsigned int nid,
unsigned long *start_pfn, unsigned long *end_pfn);
extern unsigned long find_min_pfn_with_active_regions(void);
-extern unsigned long find_max_pfn_with_active_regions(void);
extern void free_bootmem_with_active_regions(int nid,
unsigned long max_low_pfn);
typedef int (*work_fn_t)(unsigned long, unsigned long, void *);