summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorZach Brown <zach.brown@oracle.com>2006-06-25 16:46:46 +0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-06-25 21:00:54 +0400
commit9f1a3cfcffaed2fbb3206179295c79ca8289f5c3 (patch)
treed7adeab100ff8e2fe0b64fa5b2c9ef09ec60c842 /mm
parent09a9a45dc62fef5f46a0dc98a3cefdb464cc4aaa (diff)
downloadlinux-9f1a3cfcffaed2fbb3206179295c79ca8289f5c3.tar.xz
[PATCH] AOP_TRUNCATED_PAGE victims in read_pages() belong in the LRU
AOP_TRUNCATED_PAGE victims in read_pages() belong in the LRU Nick Piggin rightly pointed out that the introduction of AOP_TRUNCATED_PAGE to read_pages() was wrong to leave A_T_P victim pages in the page cache but not put them in the LRU. Failing to do so hid them from the VM. A_T_P just means that the aop method unlocked the page rather than performing IO. It would be very rare that the page was truncated between the unlock and testing A_T_P. So we leave the pages in the LRU for likely reuse soon rather than backing them back out of the page cache. We do this by matching the behaviour before the A_T_P introduction which added pages to the LRU regardless of what ->readpage() did. This doesn't include the unrelated cleanup in Nick's initial fix which changed read_pages() to return void to match its only caller's behaviour of ignoring errors. Signed-off-by: Nick Piggin <nickpiggin@yahoo.com.au> Signed-off-by: Zach Brown <zach.brown@oracle.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/readahead.c13
1 files changed, 5 insertions, 8 deletions
diff --git a/mm/readahead.c b/mm/readahead.c
index 0f142a40984b..4ee52cadab93 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -182,14 +182,11 @@ static int read_pages(struct address_space *mapping, struct file *filp,
list_del(&page->lru);
if (!add_to_page_cache(page, mapping,
page->index, GFP_KERNEL)) {
- ret = mapping->a_ops->readpage(filp, page);
- if (ret != AOP_TRUNCATED_PAGE) {
- if (!pagevec_add(&lru_pvec, page))
- __pagevec_lru_add(&lru_pvec);
- continue;
- } /* else fall through to release */
- }
- page_cache_release(page);
+ mapping->a_ops->readpage(filp, page);
+ if (!pagevec_add(&lru_pvec, page))
+ __pagevec_lru_add(&lru_pvec);
+ } else
+ page_cache_release(page);
}
pagevec_lru_add(&lru_pvec);
ret = 0;