summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAndy Whitcroft <apw@shadowen.org>2007-05-07 01:49:14 +0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-05-07 23:12:52 +0400
commit14e072984179d3d421bf9ab75cc67e0961742841 (patch)
tree65a5a6f7d9756b8e7010278b58908d04da257a28
parentac267728f13c55017ed5ee243c9c3166e27ab929 (diff)
downloadlinux-14e072984179d3d421bf9ab75cc67e0961742841.tar.xz
add pfn_valid_within helper for sub-MAX_ORDER hole detection
Generally we work under the assumption that memory the mem_map array is contigious and valid out to MAX_ORDER_NR_PAGES block of pages, ie. that if we have validated any page within this MAX_ORDER_NR_PAGES block we need not check any other. This is not true when CONFIG_HOLES_IN_ZONE is set and we must check each and every reference we make from a pfn. Add a pfn_valid_within() helper which should be used when scanning pages within a MAX_ORDER_NR_PAGES block when we have already checked the validility of the block normally with pfn_valid(). This can then be optimised away when we do not have holes within a MAX_ORDER_NR_PAGES block of pages. Signed-off-by: Andy Whitcroft <apw@shadowen.org> Acked-by: Mel Gorman <mel@csn.ul.ie> Acked-by: Bob Picco <bob.picco@hp.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/mmzone.h12
-rw-r--r--mm/page_alloc.c8
2 files changed, 14 insertions, 6 deletions
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index ee9e3143df4f..2f1544e83042 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -784,6 +784,18 @@ void sparse_init(void);
void memory_present(int nid, unsigned long start, unsigned long end);
unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long);
+/*
+ * If it is possible to have holes within a MAX_ORDER_NR_PAGES, then we
+ * need to check pfn validility within that MAX_ORDER_NR_PAGES block.
+ * pfn_valid_within() should be used in this case; we optimise this away
+ * when we have no holes within a MAX_ORDER_NR_PAGES block.
+ */
+#ifdef CONFIG_HOLES_IN_ZONE
+#define pfn_valid_within(pfn) pfn_valid(pfn)
+#else
+#define pfn_valid_within(pfn) (1)
+#endif
+
#endif /* !__ASSEMBLY__ */
#endif /* __KERNEL__ */
#endif /* _LINUX_MMZONE_H */
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 019ceda6a8b6..f564717d22f3 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -156,10 +156,8 @@ static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
static int page_is_consistent(struct zone *zone, struct page *page)
{
-#ifdef CONFIG_HOLES_IN_ZONE
- if (!pfn_valid(page_to_pfn(page)))
+ if (!pfn_valid_within(page_to_pfn(page)))
return 0;
-#endif
if (zone != page_zone(page))
return 0;
@@ -346,10 +344,8 @@ __find_combined_index(unsigned long page_idx, unsigned int order)
static inline int page_is_buddy(struct page *page, struct page *buddy,
int order)
{
-#ifdef CONFIG_HOLES_IN_ZONE
- if (!pfn_valid(page_to_pfn(buddy)))
+ if (!pfn_valid_within(page_to_pfn(buddy)))
return 0;
-#endif
if (page_zone_id(page) != page_zone_id(buddy))
return 0;