diff options
author | Hugh Dickins <hugh@veritas.com> | 2008-02-05 09:28:44 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2008-02-05 20:44:15 +0300 |
commit | d3602444e1e3485890eea5f61366e19a287c00c4 (patch) | |
tree | f27cca0ce7c113c6b8f15f77abc3124faea9e2c8 /mm/shmem.c | |
parent | 27d54b398ec0edea0e7417f003171017300e0efc (diff) | |
download | linux-d3602444e1e3485890eea5f61366e19a287c00c4.tar.xz |
shmem_getpage return page locked
In the new aops, write_begin is supposed to return the page locked: though
I've seen no ill effects, that's been overlooked in the case of
shmem_write_begin, and should be fixed. Then shmem_write_end must unlock the
page: do so _after_ updating i_size, as we found to be important in other
filesystems (though since shmem pages don't go the usual writeback route, they
never suffered from that corruption).
For shmem_write_begin to return the page locked, we need shmem_getpage to
return the page locked in SGP_WRITE case as well as SGP_CACHE case: let's
simplify the interface and return it locked even when SGP_READ.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Acked-by: Rik van Riel <riel@redhat.com>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/shmem.c')
-rw-r--r-- | mm/shmem.c | 22 |
1 files changed, 13 insertions, 9 deletions
diff --git a/mm/shmem.c b/mm/shmem.c index 20cefe16eafb..43d071922b81 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -729,6 +729,8 @@ static int shmem_notify_change(struct dentry *dentry, struct iattr *attr) (void) shmem_getpage(inode, attr->ia_size>>PAGE_CACHE_SHIFT, &page, SGP_READ, NULL); + if (page) + unlock_page(page); } /* * Reset SHMEM_PAGEIN flag so that shmem_truncate can @@ -1286,12 +1288,7 @@ repeat: SetPageUptodate(filepage); } done: - if (*pagep != filepage) { - *pagep = filepage; - if (sgp != SGP_CACHE) - unlock_page(filepage); - - } + *pagep = filepage; return 0; failed: @@ -1469,12 +1466,13 @@ shmem_write_end(struct file *file, struct address_space *mapping, { struct inode *inode = mapping->host; + if (pos + copied > inode->i_size) + i_size_write(inode, pos + copied); + + unlock_page(page); set_page_dirty(page); page_cache_release(page); - if (pos+copied > inode->i_size) - i_size_write(inode, pos+copied); - return copied; } @@ -1529,6 +1527,7 @@ shmem_file_write(struct file *file, const char __user *buf, size_t count, loff_t if (err) break; + unlock_page(page); left = bytes; if (PageHighMem(page)) { volatile unsigned char dummy; @@ -1610,6 +1609,8 @@ static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_ desc->error = 0; break; } + if (page) + unlock_page(page); /* * We must evaluate after, since reads (unlike writes) @@ -1899,6 +1900,7 @@ static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *s iput(inode); return error; } + unlock_page(page); inode->i_op = &shmem_symlink_inode_operations; kaddr = kmap_atomic(page, KM_USER0); memcpy(kaddr, symname, len); @@ -1926,6 +1928,8 @@ static void *shmem_follow_link(struct dentry *dentry, struct nameidata *nd) struct page *page = NULL; int res = shmem_getpage(dentry->d_inode, 0, &page, SGP_READ, NULL); nd_set_link(nd, res ? ERR_PTR(res) : kmap(page)); + if (page) + unlock_page(page); return page; } |