summaryrefslogtreecommitdiff
path: root/fs/nilfs2/inode.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/nilfs2/inode.c')
-rw-r--r--fs/nilfs2/inode.c70
1 files changed, 24 insertions, 46 deletions
diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c
index be6acf6e2bfc..6613b8fcceb0 100644
--- a/fs/nilfs2/inode.c
+++ b/fs/nilfs2/inode.c
@@ -68,6 +68,8 @@ void nilfs_inode_sub_blocks(struct inode *inode, int n)
*
* This function does not issue actual read request of the specified data
* block. It is done by VFS.
+ *
+ * Return: 0 on success, or a negative error code on failure.
*/
int nilfs_get_block(struct inode *inode, sector_t blkoff,
struct buffer_head *bh_result, int create)
@@ -141,6 +143,8 @@ int nilfs_get_block(struct inode *inode, sector_t blkoff,
* address_space_operations.
* @file: file struct of the file to be read
* @folio: the folio to be read
+ *
+ * Return: 0 on success, or a negative error code on failure.
*/
static int nilfs_read_folio(struct file *file, struct folio *folio)
{
@@ -170,37 +174,6 @@ static int nilfs_writepages(struct address_space *mapping,
return err;
}
-static int nilfs_writepage(struct page *page, struct writeback_control *wbc)
-{
- struct folio *folio = page_folio(page);
- struct inode *inode = folio->mapping->host;
- int err;
-
- if (sb_rdonly(inode->i_sb)) {
- /*
- * It means that filesystem was remounted in read-only
- * mode because of error or metadata corruption. But we
- * have dirty pages that try to be flushed in background.
- * So, here we simply discard this dirty page.
- */
- nilfs_clear_folio_dirty(folio);
- folio_unlock(folio);
- return -EROFS;
- }
-
- folio_redirty_for_writepage(wbc, folio);
- folio_unlock(folio);
-
- if (wbc->sync_mode == WB_SYNC_ALL) {
- err = nilfs_construct_segment(inode->i_sb);
- if (unlikely(err))
- return err;
- } else if (wbc->for_reclaim)
- nilfs_flush_segment(inode->i_sb, inode->i_ino);
-
- return 0;
-}
-
static bool nilfs_dirty_folio(struct address_space *mapping,
struct folio *folio)
{
@@ -273,7 +246,7 @@ static int nilfs_write_end(struct file *file, struct address_space *mapping,
unsigned int nr_dirty;
int err;
- nr_dirty = nilfs_page_count_clean_buffers(&folio->page, start,
+ nr_dirty = nilfs_page_count_clean_buffers(folio, start,
start + copied);
copied = generic_write_end(file, mapping, pos, len, copied, folio,
fsdata);
@@ -295,7 +268,6 @@ nilfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
}
const struct address_space_operations nilfs_aops = {
- .writepage = nilfs_writepage,
.read_folio = nilfs_read_folio,
.writepages = nilfs_writepages,
.dirty_folio = nilfs_dirty_folio,
@@ -304,9 +276,14 @@ const struct address_space_operations nilfs_aops = {
.write_end = nilfs_write_end,
.invalidate_folio = block_invalidate_folio,
.direct_IO = nilfs_direct_IO,
+ .migrate_folio = buffer_migrate_folio_norefs,
.is_partially_uptodate = block_is_partially_uptodate,
};
+const struct address_space_operations nilfs_buffer_cache_aops = {
+ .invalidate_folio = block_invalidate_folio,
+};
+
static int nilfs_insert_inode_locked(struct inode *inode,
struct nilfs_root *root,
unsigned long ino)
@@ -575,8 +552,14 @@ struct inode *nilfs_iget(struct super_block *sb, struct nilfs_root *root,
inode = nilfs_iget_locked(sb, root, ino);
if (unlikely(!inode))
return ERR_PTR(-ENOMEM);
- if (!(inode->i_state & I_NEW))
+
+ if (!(inode->i_state & I_NEW)) {
+ if (!inode->i_nlink) {
+ iput(inode);
+ return ERR_PTR(-ESTALE);
+ }
return inode;
+ }
err = __nilfs_read_inode(sb, root, ino, inode);
if (unlikely(err)) {
@@ -619,10 +602,7 @@ struct inode *nilfs_iget_for_gc(struct super_block *sb, unsigned long ino,
* or does nothing if the inode already has it. This function allocates
* an additional inode to maintain page cache of B-tree nodes one-on-one.
*
- * Return Value: On success, 0 is returned. On errors, one of the following
- * negative error code is returned.
- *
- * %-ENOMEM - Insufficient memory available.
+ * Return: 0 on success, or %-ENOMEM if memory is insufficient.
*/
int nilfs_attach_btree_node_cache(struct inode *inode)
{
@@ -681,11 +661,8 @@ void nilfs_detach_btree_node_cache(struct inode *inode)
* in one inode and the one for b-tree node pages is set up in the
* other inode, which is attached to the former inode.
*
- * Return Value: On success, a pointer to the inode for data pages is
- * returned. On errors, one of the following negative error code is returned
- * in a pointer type.
- *
- * %-ENOMEM - Insufficient memory available.
+ * Return: a pointer to the inode for data pages on success, or %-ENOMEM
+ * if memory is insufficient.
*/
struct inode *nilfs_iget_for_shadow(struct inode *inode)
{
@@ -706,6 +683,7 @@ struct inode *nilfs_iget_for_shadow(struct inode *inode)
NILFS_I(s_inode)->i_flags = 0;
memset(NILFS_I(s_inode)->i_bmap, 0, sizeof(struct nilfs_bmap));
mapping_set_gfp_mask(s_inode->i_mapping, GFP_NOFS);
+ s_inode->i_mapping->a_ops = &nilfs_buffer_cache_aops;
err = nilfs_attach_btree_node_cache(s_inode);
if (unlikely(err)) {
@@ -1208,7 +1186,7 @@ int nilfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
if (size) {
if (phys && blkphy << blkbits == phys + size) {
/* The current extent goes on */
- size += n << blkbits;
+ size += (u64)n << blkbits;
} else {
/* Terminate the current extent */
ret = fiemap_fill_next_extent(
@@ -1221,14 +1199,14 @@ int nilfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
flags = FIEMAP_EXTENT_MERGED;
logical = blkoff << blkbits;
phys = blkphy << blkbits;
- size = n << blkbits;
+ size = (u64)n << blkbits;
}
} else {
/* Start a new extent */
flags = FIEMAP_EXTENT_MERGED;
logical = blkoff << blkbits;
phys = blkphy << blkbits;
- size = n << blkbits;
+ size = (u64)n << blkbits;
}
blkoff += n;
}