summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@sgi.com>2006-01-08 12:00:53 +0300
committerLinus Torvalds <torvalds@g5.osdl.org>2006-01-09 07:12:42 +0300
commit1480a540c98525640174a7eadd712378fcd6fd63 (patch)
tree28f2cc0aa819ff0aed30dd85bb16177d4a73002b /mm
parent8419c3181086c86664e8246bc997afc2e4ffba4f (diff)
downloadlinux-1480a540c98525640174a7eadd712378fcd6fd63.tar.xz
[PATCH] SwapMig: add_to_swap() avoid atomic allocations
Add gfp_mask to add_to_swap add_to_swap does allocations with GFP_ATOMIC in order not to interfere with swapping. During migration we may have use add_to_swap extensively which may lead to out of memory errors. This patch makes add_to_swap take a parameter that specifies the gfp mask. The page migration code can then make add_to_swap use GFP_KERNEL. Signed-off-by: Hirokazu Takahashi <taka@valinux.co.jp> Signed-off-by: Dave Hansen <haveblue@us.ibm.com> Signed-off-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/swap_state.c4
-rw-r--r--mm/vmscan.c4
2 files changed, 4 insertions, 4 deletions
diff --git a/mm/swap_state.c b/mm/swap_state.c
index fc2aecb70a95..7b09ac503fec 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -141,7 +141,7 @@ void __delete_from_swap_cache(struct page *page)
* Allocate swap space for the page and add the page to the
* swap cache. Caller needs to hold the page lock.
*/
-int add_to_swap(struct page * page)
+int add_to_swap(struct page * page, gfp_t gfp_mask)
{
swp_entry_t entry;
int err;
@@ -166,7 +166,7 @@ int add_to_swap(struct page * page)
* Add it to the swap cache and mark it dirty
*/
err = __add_to_swap_cache(page, entry,
- GFP_ATOMIC|__GFP_NOMEMALLOC|__GFP_NOWARN);
+ gfp_mask|__GFP_NOMEMALLOC|__GFP_NOWARN);
switch (err) {
case 0: /* Success */
diff --git a/mm/vmscan.c b/mm/vmscan.c
index daed4a73b761..5393b093a87b 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -458,7 +458,7 @@ static int shrink_list(struct list_head *page_list, struct scan_control *sc)
* Try to allocate it some swap space here.
*/
if (PageAnon(page) && !PageSwapCache(page)) {
- if (!add_to_swap(page))
+ if (!add_to_swap(page, GFP_ATOMIC))
goto activate_locked;
}
#endif /* CONFIG_SWAP */
@@ -715,7 +715,7 @@ redo:
}
if (PageAnon(page) && !PageSwapCache(page)) {
- if (!add_to_swap(page)) {
+ if (!add_to_swap(page, GFP_KERNEL)) {
unlock_page(page);
list_move(&page->lru, &failed);
nr_failed++;