summaryrefslogtreecommitdiff
path: root/include/linux/gfp.h
diff options
context:
space:
mode:
authorJiri Kosina <jkosina@suse.cz>2011-02-15 12:24:31 +0300
committerJiri Kosina <jkosina@suse.cz>2011-02-15 12:24:31 +0300
commit0a9d59a2461477bd9ed143c01af9df3f8f00fa81 (patch)
treedf997d1cfb0786427a0df1fbd6f0640fa4248cf4 /include/linux/gfp.h
parenta23ce6da9677d245aa0aadc99f4197030350ab54 (diff)
parent795abaf1e4e188c4171e3cd3dbb11a9fcacaf505 (diff)
downloadlinux-0a9d59a2461477bd9ed143c01af9df3f8f00fa81.tar.xz
Merge branch 'master' into for-next
Diffstat (limited to 'include/linux/gfp.h')
-rw-r--r--include/linux/gfp.h17
1 files changed, 13 insertions, 4 deletions
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index f54adfcbec9c..0b84c61607e8 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -34,6 +34,7 @@ struct vm_area_struct;
#else
#define ___GFP_NOTRACK 0
#endif
+#define ___GFP_NO_KSWAPD 0x400000u
/*
* GFP bitmasks..
@@ -81,13 +82,15 @@ struct vm_area_struct;
#define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE) /* Page is reclaimable */
#define __GFP_NOTRACK ((__force gfp_t)___GFP_NOTRACK) /* Don't track with kmemcheck */
+#define __GFP_NO_KSWAPD ((__force gfp_t)___GFP_NO_KSWAPD)
+
/*
* This may seem redundant, but it's a way of annotating false positives vs.
* allocations that simply cannot be supported (e.g. page tables).
*/
#define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)
-#define __GFP_BITS_SHIFT 22 /* Room for 22 __GFP_FOO bits */
+#define __GFP_BITS_SHIFT 23 /* Room for 23 __GFP_FOO bits */
#define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
/* This equals 0, but use constants in case they ever change */
@@ -106,6 +109,9 @@ struct vm_area_struct;
__GFP_HARDWALL | __GFP_HIGHMEM | \
__GFP_MOVABLE)
#define GFP_IOFS (__GFP_IO | __GFP_FS)
+#define GFP_TRANSHUGE (GFP_HIGHUSER_MOVABLE | __GFP_COMP | \
+ __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN | \
+ __GFP_NO_KSWAPD)
#ifdef CONFIG_NUMA
#define GFP_THISNODE (__GFP_THISNODE | __GFP_NOWARN | __GFP_NORETRY)
@@ -243,7 +249,7 @@ static inline enum zone_type gfp_zone(gfp_t flags)
((1 << ZONES_SHIFT) - 1);
if (__builtin_constant_p(bit))
- MAYBE_BUILD_BUG_ON((GFP_ZONE_BAD >> bit) & 1);
+ BUILD_BUG_ON((GFP_ZONE_BAD >> bit) & 1);
else {
#ifdef CONFIG_DEBUG_VM
BUG_ON((GFP_ZONE_BAD >> bit) & 1);
@@ -325,14 +331,17 @@ alloc_pages(gfp_t gfp_mask, unsigned int order)
{
return alloc_pages_current(gfp_mask, order);
}
-extern struct page *alloc_page_vma(gfp_t gfp_mask,
+extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order,
struct vm_area_struct *vma, unsigned long addr);
#else
#define alloc_pages(gfp_mask, order) \
alloc_pages_node(numa_node_id(), gfp_mask, order)
-#define alloc_page_vma(gfp_mask, vma, addr) alloc_pages(gfp_mask, 0)
+#define alloc_pages_vma(gfp_mask, order, vma, addr) \
+ alloc_pages(gfp_mask, order)
#endif
#define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0)
+#define alloc_page_vma(gfp_mask, vma, addr) \
+ alloc_pages_vma(gfp_mask, 0, vma, addr)
extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order);
extern unsigned long get_zeroed_page(gfp_t gfp_mask);