summaryrefslogtreecommitdiff
path: root/fs/xfs/linux-2.6/xfs_buf.c
diff options
context:
space:
mode:
authorLachlan McIlroy <lachlan@sgi.com>2007-11-23 08:27:32 +0300
committerLachlan McIlroy <lachlan@redback.melbourne.sgi.com>2008-02-07 10:13:27 +0300
commit98ce2b5b1bd6db9f8d510b4333757fa6b1efe131 (patch)
treed85791e0b40d9589fb4998070319b25b0700fa0d /fs/xfs/linux-2.6/xfs_buf.c
parentbc58f9bb6be02a80b5f1f757b656c9affc07154f (diff)
downloadlinux-98ce2b5b1bd6db9f8d510b4333757fa6b1efe131.tar.xz
[XFS] 971186 Undo mod xfs-linux-melb:xfs-kern:29845a due to a regression
SGI-PV: 971596 SGI-Modid: xfs-linux-melb:xfs-kern:29902a Signed-off-by: Lachlan McIlroy <lachlan@sgi.com>
Diffstat (limited to 'fs/xfs/linux-2.6/xfs_buf.c')
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.c53
1 files changed, 48 insertions, 5 deletions
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
index a7c7cb27fa5a..522cfaa70258 100644
--- a/fs/xfs/linux-2.6/xfs_buf.c
+++ b/fs/xfs/linux-2.6/xfs_buf.c
@@ -409,7 +409,6 @@ _xfs_buf_lookup_pages(
congestion_wait(WRITE, HZ/50);
goto retry;
}
- unlock_page(page);
XFS_STATS_INC(xb_page_found);
@@ -419,7 +418,10 @@ _xfs_buf_lookup_pages(
ASSERT(!PagePrivate(page));
if (!PageUptodate(page)) {
page_count--;
- if (blocksize < PAGE_CACHE_SIZE && !PagePrivate(page)) {
+ if (blocksize >= PAGE_CACHE_SIZE) {
+ if (flags & XBF_READ)
+ bp->b_locked = 1;
+ } else if (!PagePrivate(page)) {
if (test_page_region(page, offset, nbytes))
page_count++;
}
@@ -429,6 +431,11 @@ _xfs_buf_lookup_pages(
offset = 0;
}
+ if (!bp->b_locked) {
+ for (i = 0; i < bp->b_page_count; i++)
+ unlock_page(bp->b_pages[i]);
+ }
+
if (page_count == bp->b_page_count)
bp->b_flags |= XBF_DONE;
@@ -745,6 +752,7 @@ xfs_buf_associate_memory(
bp->b_pages[i] = mem_to_page((void *)pageaddr);
pageaddr += PAGE_CACHE_SIZE;
}
+ bp->b_locked = 0;
bp->b_count_desired = len;
bp->b_buffer_length = buflen;
@@ -1091,13 +1099,25 @@ xfs_buf_iostart(
return status;
}
+STATIC_INLINE int
+_xfs_buf_iolocked(
+ xfs_buf_t *bp)
+{
+ ASSERT(bp->b_flags & (XBF_READ | XBF_WRITE));
+ if (bp->b_flags & XBF_READ)
+ return bp->b_locked;
+ return 0;
+}
+
STATIC_INLINE void
_xfs_buf_ioend(
xfs_buf_t *bp,
int schedule)
{
- if (atomic_dec_and_test(&bp->b_io_remaining) == 1)
+ if (atomic_dec_and_test(&bp->b_io_remaining) == 1) {
+ bp->b_locked = 0;
xfs_buf_ioend(bp, schedule);
+ }
}
STATIC void
@@ -1128,6 +1148,10 @@ xfs_buf_bio_end_io(
if (--bvec >= bio->bi_io_vec)
prefetchw(&bvec->bv_page->flags);
+
+ if (_xfs_buf_iolocked(bp)) {
+ unlock_page(page);
+ }
} while (bvec >= bio->bi_io_vec);
_xfs_buf_ioend(bp, 1);
@@ -1138,12 +1162,13 @@ STATIC void
_xfs_buf_ioapply(
xfs_buf_t *bp)
{
- int rw, map_i, total_nr_pages, nr_pages;
+ int i, rw, map_i, total_nr_pages, nr_pages;
struct bio *bio;
int offset = bp->b_offset;
int size = bp->b_count_desired;
sector_t sector = bp->b_bn;
unsigned int blocksize = bp->b_target->bt_bsize;
+ int locking = _xfs_buf_iolocked(bp);
total_nr_pages = bp->b_page_count;
map_i = 0;
@@ -1166,7 +1191,7 @@ _xfs_buf_ioapply(
* filesystem block size is not smaller than the page size.
*/
if ((bp->b_buffer_length < PAGE_CACHE_SIZE) &&
- (bp->b_flags & XBF_READ) &&
+ (bp->b_flags & XBF_READ) && locking &&
(blocksize >= PAGE_CACHE_SIZE)) {
bio = bio_alloc(GFP_NOIO, 1);
@@ -1183,6 +1208,24 @@ _xfs_buf_ioapply(
goto submit_io;
}
+ /* Lock down the pages which we need to for the request */
+ if (locking && (bp->b_flags & XBF_WRITE) && (bp->b_locked == 0)) {
+ for (i = 0; size; i++) {
+ int nbytes = PAGE_CACHE_SIZE - offset;
+ struct page *page = bp->b_pages[i];
+
+ if (nbytes > size)
+ nbytes = size;
+
+ lock_page(page);
+
+ size -= nbytes;
+ offset = 0;
+ }
+ offset = bp->b_offset;
+ size = bp->b_count_desired;
+ }
+
next_chunk:
atomic_inc(&bp->b_io_remaining);
nr_pages = BIO_MAX_SECTORS >> (PAGE_SHIFT - BBSHIFT);