diff options
Diffstat (limited to 'fs')
313 files changed, 9310 insertions, 7713 deletions
diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c index 055562c580b4..9ff073f4090a 100644 --- a/fs/9p/vfs_addr.c +++ b/fs/9p/vfs_addr.c @@ -148,13 +148,14 @@ static int v9fs_release_page(struct page *page, gfp_t gfp) * @offset: offset in the page */ -static void v9fs_invalidate_page(struct page *page, unsigned long offset) +static void v9fs_invalidate_page(struct page *page, unsigned int offset, + unsigned int length) { /* * If called with zero offset, we should release * the private state assocated with the page */ - if (offset == 0) + if (offset == 0 && length == PAGE_CACHE_SIZE) v9fs_fscache_invalidate_page(page); } diff --git a/fs/9p/vfs_dir.c b/fs/9p/vfs_dir.c index be1e34adc3c6..4d0c2e0be7e5 100644 --- a/fs/9p/vfs_dir.c +++ b/fs/9p/vfs_dir.c @@ -101,16 +101,15 @@ static struct p9_rdir *v9fs_alloc_rdir_buf(struct file *filp, int buflen) } /** - * v9fs_dir_readdir - read a directory - * @filp: opened file structure - * @dirent: directory structure ??? - * @filldir: function to populate directory structure ??? + * v9fs_dir_readdir - iterate through a directory + * @file: opened file structure + * @ctx: actor we feed the entries to * */ -static int v9fs_dir_readdir(struct file *filp, void *dirent, filldir_t filldir) +static int v9fs_dir_readdir(struct file *file, struct dir_context *ctx) { - int over; + bool over; struct p9_wstat st; int err = 0; struct p9_fid *fid; @@ -118,19 +117,19 @@ static int v9fs_dir_readdir(struct file *filp, void *dirent, filldir_t filldir) int reclen = 0; struct p9_rdir *rdir; - p9_debug(P9_DEBUG_VFS, "name %s\n", filp->f_path.dentry->d_name.name); - fid = filp->private_data; + p9_debug(P9_DEBUG_VFS, "name %s\n", file->f_path.dentry->d_name.name); + fid = file->private_data; buflen = fid->clnt->msize - P9_IOHDRSZ; - rdir = v9fs_alloc_rdir_buf(filp, buflen); + rdir = v9fs_alloc_rdir_buf(file, buflen); if (!rdir) return -ENOMEM; while (1) { if (rdir->tail == rdir->head) { - err = v9fs_file_readn(filp, rdir->buf, NULL, - buflen, filp->f_pos); + err = v9fs_file_readn(file, rdir->buf, NULL, + buflen, ctx->pos); if (err <= 0) return err; @@ -148,51 +147,45 @@ static int v9fs_dir_readdir(struct file *filp, void *dirent, filldir_t filldir) } reclen = st.size+2; - over = filldir(dirent, st.name, strlen(st.name), - filp->f_pos, v9fs_qid2ino(&st.qid), dt_type(&st)); - + over = !dir_emit(ctx, st.name, strlen(st.name), + v9fs_qid2ino(&st.qid), dt_type(&st)); p9stat_free(&st); - if (over) return 0; rdir->head += reclen; - filp->f_pos += reclen; + ctx->pos += reclen; } } } /** - * v9fs_dir_readdir_dotl - read a directory - * @filp: opened file structure - * @dirent: buffer to fill dirent structures - * @filldir: function to populate dirent structures + * v9fs_dir_readdir_dotl - iterate through a directory + * @file: opened file structure + * @ctx: actor we feed the entries to * */ -static int v9fs_dir_readdir_dotl(struct file *filp, void *dirent, - filldir_t filldir) +static int v9fs_dir_readdir_dotl(struct file *file, struct dir_context *ctx) { - int over; int err = 0; struct p9_fid *fid; int buflen; struct p9_rdir *rdir; struct p9_dirent curdirent; - u64 oldoffset = 0; - p9_debug(P9_DEBUG_VFS, "name %s\n", filp->f_path.dentry->d_name.name); - fid = filp->private_data; + p9_debug(P9_DEBUG_VFS, "name %s\n", file->f_path.dentry->d_name.name); + fid = file->private_data; buflen = fid->clnt->msize - P9_READDIRHDRSZ; - rdir = v9fs_alloc_rdir_buf(filp, buflen); + rdir = v9fs_alloc_rdir_buf(file, buflen); if (!rdir) return -ENOMEM; while (1) { if (rdir->tail == rdir->head) { err = p9_client_readdir(fid, rdir->buf, buflen, - filp->f_pos); + ctx->pos); if (err <= 0) return err; @@ -210,22 +203,13 @@ static int v9fs_dir_readdir_dotl(struct file *filp, void *dirent, return -EIO; } - /* d_off in dirent structure tracks the offset into - * the next dirent in the dir. However, filldir() - * expects offset into the current dirent. Hence - * while calling filldir send the offset from the - * previous dirent structure. - */ - over = filldir(dirent, curdirent.d_name, - strlen(curdirent.d_name), - oldoffset, v9fs_qid2ino(&curdirent.qid), - curdirent.d_type); - oldoffset = curdirent.d_off; - - if (over) + if (!dir_emit(ctx, curdirent.d_name, + strlen(curdirent.d_name), + v9fs_qid2ino(&curdirent.qid), + curdirent.d_type)) return 0; - filp->f_pos = curdirent.d_off; + ctx->pos = curdirent.d_off; rdir->head += err; } } @@ -254,7 +238,7 @@ int v9fs_dir_release(struct inode *inode, struct file *filp) const struct file_operations v9fs_dir_operations = { .read = generic_read_dir, .llseek = generic_file_llseek, - .readdir = v9fs_dir_readdir, + .iterate = v9fs_dir_readdir, .open = v9fs_file_open, .release = v9fs_dir_release, }; @@ -262,7 +246,7 @@ const struct file_operations v9fs_dir_operations = { const struct file_operations v9fs_dir_operations_dotl = { .read = generic_read_dir, .llseek = generic_file_llseek, - .readdir = v9fs_dir_readdir_dotl, + .iterate = v9fs_dir_readdir_dotl, .open = v9fs_file_open, .release = v9fs_dir_release, .fsync = v9fs_file_fsync_dotl, diff --git a/fs/adfs/dir.c b/fs/adfs/dir.c index 9cf874ce8336..0d138c0de293 100644 --- a/fs/adfs/dir.c +++ b/fs/adfs/dir.c @@ -17,47 +17,43 @@ static DEFINE_RWLOCK(adfs_dir_lock); static int -adfs_readdir(struct file *filp, void *dirent, filldir_t filldir) +adfs_readdir(struct file *file, struct dir_context *ctx) { - struct inode *inode = file_inode(filp); + struct inode *inode = file_inode(file); struct super_block *sb = inode->i_sb; struct adfs_dir_ops *ops = ADFS_SB(sb)->s_dir; struct object_info obj; struct adfs_dir dir; int ret = 0; - if (filp->f_pos >> 32) - goto out; + if (ctx->pos >> 32) + return 0; ret = ops->read(sb, inode->i_ino, inode->i_size, &dir); if (ret) - goto out; + return ret; - switch ((unsigned long)filp->f_pos) { - case 0: - if (filldir(dirent, ".", 1, 0, inode->i_ino, DT_DIR) < 0) + if (ctx->pos == 0) { + if (!dir_emit_dot(file, ctx)) goto free_out; - filp->f_pos += 1; - - case 1: - if (filldir(dirent, "..", 2, 1, dir.parent_id, DT_DIR) < 0) + ctx->pos = 1; + } + if (ctx->pos == 1) { + if (!dir_emit(ctx, "..", 2, dir.parent_id, DT_DIR)) goto free_out; - filp->f_pos += 1; - - default: - break; + ctx->pos = 2; } read_lock(&adfs_dir_lock); - ret = ops->setpos(&dir, filp->f_pos - 2); + ret = ops->setpos(&dir, ctx->pos - 2); if (ret) goto unlock_out; while (ops->getnext(&dir, &obj) == 0) { - if (filldir(dirent, obj.name, obj.name_len, - filp->f_pos, obj.file_id, DT_UNKNOWN) < 0) - goto unlock_out; - filp->f_pos += 1; + if (!dir_emit(ctx, obj.name, obj.name_len, + obj.file_id, DT_UNKNOWN)) + break; + ctx->pos++; } unlock_out: @@ -65,8 +61,6 @@ unlock_out: free_out: ops->free(&dir); - -out: return ret; } @@ -192,13 +186,12 @@ out: const struct file_operations adfs_dir_operations = { .read = generic_read_dir, .llseek = generic_file_llseek, - .readdir = adfs_readdir, + .iterate = adfs_readdir, .fsync = generic_file_fsync, }; static int -adfs_hash(const struct dentry *parent, const struct inode *inode, - struct qstr *qstr) +adfs_hash(const struct dentry *parent, struct qstr *qstr) { const unsigned int name_len = ADFS_SB(parent->d_sb)->s_namelen; const unsigned char *name; @@ -234,8 +227,7 @@ adfs_hash(const struct dentry *parent, const struct inode *inode, * requirements of the underlying filesystem. */ static int -adfs_compare(const struct dentry *parent, const struct inode *pinode, - const struct dentry *dentry, const struct inode *inode, +adfs_compare(const struct dentry *parent, const struct dentry *dentry, unsigned int len, const char *str, const struct qstr *name) { int i; diff --git a/fs/affs/dir.c b/fs/affs/dir.c index fd11a6d608ee..f1eba8c3644e 100644 --- a/fs/affs/dir.c +++ b/fs/affs/dir.c @@ -15,12 +15,12 @@ #include "affs.h" -static int affs_readdir(struct file *, void *, filldir_t); +static int affs_readdir(struct file *, struct dir_context *); const struct file_operations affs_dir_operations = { .read = generic_read_dir, .llseek = generic_file_llseek, - .readdir = affs_readdir, + .iterate = affs_readdir, .fsync = affs_file_fsync, }; @@ -40,52 +40,35 @@ const struct inode_operations affs_dir_inode_operations = { }; static int -affs_readdir(struct file *filp, void *dirent, filldir_t filldir) +affs_readdir(struct file *file, struct dir_context *ctx) { - struct inode *inode = file_inode(filp); + struct inode *inode = file_inode(file); struct super_block *sb = inode->i_sb; - struct buffer_head *dir_bh; - struct buffer_head *fh_bh; + struct buffer_head *dir_bh = NULL; + struct buffer_head *fh_bh = NULL; unsigned char *name; int namelen; u32 i; int hash_pos; int chain_pos; - u32 f_pos; u32 ino; - int stored; - int res; - pr_debug("AFFS: readdir(ino=%lu,f_pos=%lx)\n",inode->i_ino,(unsigned long)filp->f_pos); + pr_debug("AFFS: readdir(ino=%lu,f_pos=%lx)\n",inode->i_ino,(unsigned long)ctx->pos); - stored = 0; - res = -EIO; - dir_bh = NULL; - fh_bh = NULL; - f_pos = filp->f_pos; - - if (f_pos == 0) { - filp->private_data = (void *)0; - if (filldir(dirent, ".", 1, f_pos, inode->i_ino, DT_DIR) < 0) + if (ctx->pos < 2) { + file->private_data = (void *)0; + if (!dir_emit_dots(file, ctx)) return 0; - filp->f_pos = f_pos = 1; - stored++; - } - if (f_pos == 1) { - if (filldir(dirent, "..", 2, f_pos, parent_ino(filp->f_path.dentry), DT_DIR) < 0) - return stored; - filp->f_pos = f_pos = 2; - stored++; } affs_lock_dir(inode); - chain_pos = (f_pos - 2) & 0xffff; - hash_pos = (f_pos - 2) >> 16; + chain_pos = (ctx->pos - 2) & 0xffff; + hash_pos = (ctx->pos - 2) >> 16; if (chain_pos == 0xffff) { affs_warning(sb, "readdir", "More than 65535 entries in chain"); chain_pos = 0; hash_pos++; - filp->f_pos = ((hash_pos << 16) | chain_pos) + 2; + ctx->pos = ((hash_pos << 16) | chain_pos) + 2; } dir_bh = affs_bread(sb, inode->i_ino); if (!dir_bh) @@ -94,8 +77,8 @@ affs_readdir(struct file *filp, void *dirent, filldir_t filldir) /* If the directory hasn't changed since the last call to readdir(), * we can jump directly to where we left off. */ - ino = (u32)(long)filp->private_data; - if (ino && filp->f_version == inode->i_version) { + ino = (u32)(long)file->private_data; + if (ino && file->f_version == inode->i_version) { pr_debug("AFFS: readdir() left off=%d\n", ino); goto inside; } @@ -105,7 +88,7 @@ affs_readdir(struct file *filp, void *dirent, filldir_t filldir) fh_bh = affs_bread(sb, ino); if (!fh_bh) { affs_error(sb, "readdir","Cannot read block %d", i); - goto readdir_out; + return -EIO; } ino = be32_to_cpu(AFFS_TAIL(sb, fh_bh)->hash_chain); affs_brelse(fh_bh); @@ -119,38 +102,34 @@ affs_readdir(struct file *filp, void *dirent, filldir_t filldir) ino = be32_to_cpu(AFFS_HEAD(dir_bh)->table[hash_pos]); if (!ino) continue; - f_pos = (hash_pos << 16) + 2; + ctx->pos = (hash_pos << 16) + 2; inside: do { fh_bh = affs_bread(sb, ino); if (!fh_bh) { affs_error(sb, "readdir","Cannot read block %d", ino); - goto readdir_done; + break; } namelen = min(AFFS_TAIL(sb, fh_bh)->name[0], (u8)30); name = AFFS_TAIL(sb, fh_bh)->name + 1; pr_debug("AFFS: readdir(): filldir(\"%.*s\", ino=%u), hash=%d, f_pos=%x\n", - namelen, name, ino, hash_pos, f_pos); - if (filldir(dirent, name, namelen, f_pos, ino, DT_UNKNOWN) < 0) + namelen, name, ino, hash_pos, (u32)ctx->pos); + if (!dir_emit(ctx, name, namelen, ino, DT_UNKNOWN)) goto readdir_done; - stored++; - f_pos++; + ctx->pos++; ino = be32_to_cpu(AFFS_TAIL(sb, fh_bh)->hash_chain); affs_brelse(fh_bh); fh_bh = NULL; } while (ino); } readdir_done: - filp->f_pos = f_pos; - filp->f_version = inode->i_version; - filp->private_data = (void *)(long)ino; - res = stored; + file->f_version = inode->i_version; + file->private_data = (void *)(long)ino; readdir_out: affs_brelse(dir_bh); affs_brelse(fh_bh); affs_unlock_dir(inode); - pr_debug("AFFS: readdir()=%d\n", stored); - return res; + return 0; } diff --git a/fs/affs/namei.c b/fs/affs/namei.c index ff65884a7839..c36cbb4537a2 100644 --- a/fs/affs/namei.c +++ b/fs/affs/namei.c @@ -13,18 +13,12 @@ typedef int (*toupper_t)(int); static int affs_toupper(int ch); -static int affs_hash_dentry(const struct dentry *, - const struct inode *, struct qstr *); -static int affs_compare_dentry(const struct dentry *parent, - const struct inode *pinode, - const struct dentry *dentry, const struct inode *inode, +static int affs_hash_dentry(const struct dentry *, struct qstr *); +static int affs_compare_dentry(const struct dentry *parent, const struct dentry *dentry, unsigned int len, const char *str, const struct qstr *name); static int affs_intl_toupper(int ch); -static int affs_intl_hash_dentry(const struct dentry *, - const struct inode *, struct qstr *); -static int affs_intl_compare_dentry(const struct dentry *parent, - const struct inode *pinode, - const struct dentry *dentry, const struct inode *inode, +static int affs_intl_hash_dentry(const struct dentry *, struct qstr *); +static int affs_intl_compare_dentry(const struct dentry *parent, const struct dentry *dentry, unsigned int len, const char *str, const struct qstr *name); const struct dentry_operations affs_dentry_operations = { @@ -86,14 +80,12 @@ __affs_hash_dentry(struct qstr *qstr, toupper_t toupper) } static int -affs_hash_dentry(const struct dentry *dentry, const struct inode *inode, - struct qstr *qstr) +affs_hash_dentry(const struct dentry *dentry, struct qstr *qstr) { return __affs_hash_dentry(qstr, affs_toupper); } static int -affs_intl_hash_dentry(const struct dentry *dentry, const struct inode *inode, - struct qstr *qstr) +affs_intl_hash_dentry(const struct dentry *dentry, struct qstr *qstr) { return __affs_hash_dentry(qstr, affs_intl_toupper); } @@ -131,15 +123,13 @@ static inline int __affs_compare_dentry(unsigned int len, } static int -affs_compare_dentry(const struct dentry *parent, const struct inode *pinode, - const struct dentry *dentry, const struct inode *inode, +affs_compare_dentry(const struct dentry *parent, const struct dentry *dentry, unsigned int len, const char *str, const struct qstr *name) { return __affs_compare_dentry(len, str, name, affs_toupper); } static int -affs_intl_compare_dentry(const struct dentry *parent,const struct inode *pinode, - const struct dentry *dentry, const struct inode *inode, +affs_intl_compare_dentry(const struct dentry *parent, const struct dentry *dentry, unsigned int len, const char *str, const struct qstr *name) { return __affs_compare_dentry(len, str, name, affs_intl_toupper); diff --git a/fs/afs/dir.c b/fs/afs/dir.c index 7a465ed04444..34494fbead0a 100644 --- a/fs/afs/dir.c +++ b/fs/afs/dir.c @@ -22,7 +22,7 @@ static struct dentry *afs_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags); static int afs_dir_open(struct inode *inode, struct file *file); -static int afs_readdir(struct file *file, void *dirent, filldir_t filldir); +static int afs_readdir(struct file *file, struct dir_context *ctx); static int afs_d_revalidate(struct dentry *dentry, unsigned int flags); static int afs_d_delete(const struct dentry *dentry); static void afs_d_release(struct dentry *dentry); @@ -43,7 +43,7 @@ static int afs_rename(struct inode *old_dir, struct dentry *old_dentry, const struct file_operations afs_dir_file_operations = { .open = afs_dir_open, .release = afs_release, - .readdir = afs_readdir, + .iterate = afs_readdir, .lock = afs_lock, .llseek = generic_file_llseek, }; @@ -119,9 +119,9 @@ struct afs_dir_page { }; struct afs_lookup_cookie { + struct dir_context ctx; struct afs_fid fid; - const char *name; - size_t nlen; + struct qstr name; int found; }; @@ -228,20 +228,18 @@ static int afs_dir_open(struct inode *inode, struct file *file) /* * deal with one block in an AFS directory */ -static int afs_dir_iterate_block(unsigned *fpos, +static int afs_dir_iterate_block(struct dir_context *ctx, union afs_dir_block *block, - unsigned blkoff, - void *cookie, - filldir_t filldir) + unsigned blkoff) { union afs_dirent *dire; unsigned offset, next, curr; size_t nlen; - int tmp, ret; + int tmp; - _enter("%u,%x,%p,,",*fpos,blkoff,block); + _enter("%u,%x,%p,,",(unsigned)ctx->pos,blkoff,block); - curr = (*fpos - blkoff) / sizeof(union afs_dirent); + curr = (ctx->pos - blkoff) / sizeof(union afs_dirent); /* walk through the block, an entry at a time */ for (offset = AFS_DIRENT_PER_BLOCK - block->pagehdr.nentries; @@ -256,7 +254,7 @@ static int afs_dir_iterate_block(unsigned *fpos, _debug("ENT[%Zu.%u]: unused", blkoff / sizeof(union afs_dir_block), offset); if (offset >= curr) - *fpos = blkoff + + ctx->pos = blkoff + next * sizeof(union afs_dirent); continue; } @@ -302,19 +300,15 @@ static int afs_dir_iterate_block(unsigned *fpos, continue; /* found the next entry */ - ret = filldir(cookie, - dire->u.name, - nlen, - blkoff + offset * sizeof(union afs_dirent), + if (!dir_emit(ctx, dire->u.name, nlen, ntohl(dire->u.vnode), - filldir == afs_lookup_filldir ? - ntohl(dire->u.unique) : DT_UNKNOWN); - if (ret < 0) { + ctx->actor == afs_lookup_filldir ? + ntohl(dire->u.unique) : DT_UNKNOWN)) { _leave(" = 0 [full]"); return 0; } - *fpos = blkoff + next * sizeof(union afs_dirent); + ctx->pos = blkoff + next * sizeof(union afs_dirent); } _leave(" = 1 [more]"); @@ -324,8 +318,8 @@ static int afs_dir_iterate_block(unsigned *fpos, /* * iterate through the data blob that lists the contents of an AFS directory */ -static int afs_dir_iterate(struct inode *dir, unsigned *fpos, void *cookie, - filldir_t filldir, struct key *key) +static int afs_dir_iterate(struct inode *dir, struct dir_context *ctx, + struct key *key) { union afs_dir_block *dblock; struct afs_dir_page *dbuf; @@ -333,7 +327,7 @@ static int afs_dir_iterate(struct inode *dir, unsigned *fpos, void *cookie, unsigned blkoff, limit; int ret; - _enter("{%lu},%u,,", dir->i_ino, *fpos); + _enter("{%lu},%u,,", dir->i_ino, (unsigned)ctx->pos); if (test_bit(AFS_VNODE_DELETED, &AFS_FS_I(dir)->flags)) { _leave(" = -ESTALE"); @@ -341,13 +335,13 @@ static int afs_dir_iterate(struct inode *dir, unsigned *fpos, void *cookie, } /* round the file position up to the next entry boundary */ - *fpos += sizeof(union afs_dirent) - 1; - *fpos &= ~(sizeof(union afs_dirent) - 1); + ctx->pos += sizeof(union afs_dirent) - 1; + ctx->pos &= ~(sizeof(union afs_dirent) - 1); /* walk through the blocks in sequence */ ret = 0; - while (*fpos < dir->i_size) { - blkoff = *fpos & ~(sizeof(union afs_dir_block) - 1); + while (ctx->pos < dir->i_size) { + blkoff = ctx->pos & ~(sizeof(union afs_dir_block) - 1); /* fetch the appropriate page from the directory */ page = afs_dir_get_page(dir, blkoff / PAGE_SIZE, key); @@ -364,8 +358,7 @@ static int afs_dir_iterate(struct inode *dir, unsigned *fpos, void *cookie, do { dblock = &dbuf->blocks[(blkoff % PAGE_SIZE) / sizeof(union afs_dir_block)]; - ret = afs_dir_iterate_block(fpos, dblock, blkoff, - cookie, filldir); + ret = afs_dir_iterate_block(ctx, dblock, blkoff); if (ret != 1) { afs_dir_put_page(page); goto out; @@ -373,7 +366,7 @@ static int afs_dir_iterate(struct inode *dir, unsigned *fpos, void *cookie, blkoff += sizeof(union afs_dir_block); - } while (*fpos < dir->i_size && blkoff < limit); + } while (ctx->pos < dir->i_size && blkoff < limit); afs_dir_put_page(page); ret = 0; @@ -387,23 +380,10 @@ out: /* * read an AFS directory */ -static int afs_readdir(struct file *file, void *cookie, filldir_t filldir) +static int afs_readdir(struct file *file, struct dir_context *ctx) { - unsigned fpos; - int ret; - - _enter("{%Ld,{%lu}}", - file->f_pos, file_inode(file)->i_ino); - - ASSERT(file->private_data != NULL); - - fpos = file->f_pos; - ret = afs_dir_iterate(file_inode(file), &fpos, - cookie, filldir, file->private_data); - file->f_pos = fpos; - - _leave(" = %d", ret); - return ret; + return afs_dir_iterate(file_inode(file), + ctx, file->private_data); } /* @@ -416,15 +396,16 @@ static int afs_lookup_filldir(void *_cookie, const char *name, int nlen, { struct afs_lookup_cookie *cookie = _cookie; - _enter("{%s,%Zu},%s,%u,,%llu,%u", - cookie->name, cookie->nlen, name, nlen, + _enter("{%s,%u},%s,%u,,%llu,%u", + cookie->name.name, cookie->name.len, name, nlen, (unsigned long long) ino, dtype); /* insanity checks first */ BUILD_BUG_ON(sizeof(union afs_dir_block) != 2048); BUILD_BUG_ON(sizeof(union afs_dirent) != 32); - if (cookie->nlen != nlen || memcmp(cookie->name, name, nlen) != 0) { + if (cookie->name.len != nlen || + memcmp(cookie->name.name, name, nlen) != 0) { _leave(" = 0 [no]"); return 0; } @@ -444,24 +425,18 @@ static int afs_lookup_filldir(void *_cookie, const char *name, int nlen, static int afs_do_lookup(struct inode *dir, struct dentry *dentry, struct afs_fid *fid, struct key *key) { - struct afs_lookup_cookie cookie; - struct afs_super_info *as; - unsigned fpos; + struct afs_super_info *as = dir->i_sb->s_fs_info; + struct afs_lookup_cookie cookie = { + .ctx.actor = afs_lookup_filldir, + .name = dentry->d_name, + .fid.vid = as->volume->vid + }; int ret; _enter("{%lu},%p{%s},", dir->i_ino, dentry, dentry->d_name.name); - as = dir->i_sb->s_fs_info; - /* search the directory */ - cookie.name = dentry->d_name.name; - cookie.nlen = dentry->d_name.len; - cookie.fid.vid = as->volume->vid; - cookie.found = 0; - - fpos = 0; - ret = afs_dir_iterate(dir, &fpos, &cookie, afs_lookup_filldir, - key); + ret = afs_dir_iterate(dir, &cookie.ctx, key); if (ret < 0) { _leave(" = %d [iter]", ret); return ret; diff --git a/fs/afs/file.c b/fs/afs/file.c index 8f6e9234d565..66d50fe2ee45 100644 --- a/fs/afs/file.c +++ b/fs/afs/file.c @@ -19,7 +19,8 @@ #include "internal.h" static int afs_readpage(struct file *file, struct page *page); -static void afs_invalidatepage(struct page *page, unsigned long offset); +static void afs_invalidatepage(struct page *page, unsigned int offset, + unsigned int length); static int afs_releasepage(struct page *page, gfp_t gfp_flags); static int afs_launder_page(struct page *page); @@ -310,16 +311,17 @@ static int afs_launder_page(struct page *page) * - release a page and clean up its private data if offset is 0 (indicating * the entire page) */ -static void afs_invalidatepage(struct page *page, unsigned long offset) +static void afs_invalidatepage(struct page *page, unsigned int offset, + unsigned int length) { struct afs_writeback *wb = (struct afs_writeback *) page_private(page); - _enter("{%lu},%lu", page->index, offset); + _enter("{%lu},%u,%u", page->index, offset, length); BUG_ON(!PageLocked(page)); /* we clean up only if the entire page is being invalidated */ - if (offset == 0) { + if (offset == 0 && length == PAGE_CACHE_SIZE) { #ifdef CONFIG_AFS_FSCACHE if (PageFsCache(page)) { struct afs_vnode *vnode = AFS_FS_I(page->mapping->host); diff --git a/fs/afs/flock.c b/fs/afs/flock.c index 2497bf306c70..a8cf2cff836c 100644 --- a/fs/afs/flock.c +++ b/fs/afs/flock.c @@ -252,7 +252,8 @@ static void afs_defer_unlock(struct afs_vnode *vnode, struct key *key) */ static int afs_do_setlk(struct file *file, struct file_lock *fl) { - struct afs_vnode *vnode = AFS_FS_I(file->f_mapping->host); + struct inode *inode = file_inode(file); + struct afs_vnode *vnode = AFS_FS_I(inode); afs_lock_type_t type; struct key *key = file->private_data; int ret; @@ -273,7 +274,7 @@ static int afs_do_setlk(struct file *file, struct file_lock *fl) type = (fl->fl_type == F_RDLCK) ? AFS_LOCK_READ : AFS_LOCK_WRITE; - lock_flocks(); + spin_lock(&inode->i_lock); /* make sure we've got a callback on this file and that our view of the * data version is up to date */ @@ -420,7 +421,7 @@ given_lock: afs_vnode_fetch_status(vnode, NULL, key); error: - unlock_flocks(); + spin_unlock(&inode->i_lock); _leave(" = %d", ret); return ret; @@ -39,6 +39,8 @@ #include <asm/kmap_types.h> #include <asm/uaccess.h> +#include "internal.h" + #define AIO_RING_MAGIC 0xa10a10a1 #define AIO_RING_COMPAT_FEATURES 1 #define AIO_RING_INCOMPAT_FEATURES 0 @@ -141,9 +143,6 @@ static void aio_free_ring(struct kioctx *ctx) for (i = 0; i < ctx->nr_pages; i++) put_page(ctx->ring_pages[i]); - if (ctx->mmap_size) - vm_munmap(ctx->mmap_base, ctx->mmap_size); - if (ctx->ring_pages && ctx->ring_pages != ctx->internal_pages) kfree(ctx->ring_pages); } @@ -322,11 +321,6 @@ static void free_ioctx(struct kioctx *ctx) aio_free_ring(ctx); - spin_lock(&aio_nr_lock); - BUG_ON(aio_nr - ctx->max_reqs > aio_nr); - aio_nr -= ctx->max_reqs; - spin_unlock(&aio_nr_lock); - pr_debug("freeing %p\n", ctx); /* @@ -435,17 +429,24 @@ static void kill_ioctx(struct kioctx *ctx) { if (!atomic_xchg(&ctx->dead, 1)) { hlist_del_rcu(&ctx->list); - /* Between hlist_del_rcu() and dropping the initial ref */ - synchronize_rcu(); /* - * We can't punt to workqueue here because put_ioctx() -> - * free_ioctx() will unmap the ringbuffer, and that has to be - * done in the original process's context. kill_ioctx_rcu/work() - * exist for exit_aio(), as in that path free_ioctx() won't do - * the unmap. + * It'd be more correct to do this in free_ioctx(), after all + * the outstanding kiocbs have finished - but by then io_destroy + * has already returned, so io_setup() could potentially return + * -EAGAIN with no ioctxs actually in use (as far as userspace + * could tell). */ - kill_ioctx_work(&ctx->rcu_work); + spin_lock(&aio_nr_lock); + BUG_ON(aio_nr - ctx->max_reqs > aio_nr); + aio_nr -= ctx->max_reqs; + spin_unlock(&aio_nr_lock); + + if (ctx->mmap_size) + vm_munmap(ctx->mmap_base, ctx->mmap_size); + + /* Between hlist_del_rcu() and dropping the initial ref */ + call_rcu(&ctx->rcu_head, kill_ioctx_rcu); } } @@ -495,10 +496,7 @@ void exit_aio(struct mm_struct *mm) */ ctx->mmap_size = 0; - if (!atomic_xchg(&ctx->dead, 1)) { - hlist_del_rcu(&ctx->list); - call_rcu(&ctx->rcu_head, kill_ioctx_rcu); - } + kill_ioctx(ctx); } } @@ -627,7 +625,7 @@ void aio_complete(struct kiocb *iocb, long res, long res2) /* * Add a completion event to the ring buffer. Must be done holding - * ctx->ctx_lock to prevent other code from messing with the tail + * ctx->completion_lock to prevent other code from messing with the tail * pointer since we might be called from irq context. */ spin_lock_irqsave(&ctx->completion_lock, flags); diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c index 085da86e07c2..ca8e55548d98 100644 --- a/fs/autofs4/root.c +++ b/fs/autofs4/root.c @@ -41,7 +41,7 @@ const struct file_operations autofs4_root_operations = { .open = dcache_dir_open, .release = dcache_dir_close, .read = generic_read_dir, - .readdir = dcache_readdir, + .iterate = dcache_readdir, .llseek = dcache_dir_lseek, .unlocked_ioctl = autofs4_root_ioctl, #ifdef CONFIG_COMPAT @@ -53,7 +53,7 @@ const struct file_operations autofs4_dir_operations = { .open = autofs4_dir_open, .release = dcache_dir_close, .read = generic_read_dir, - .readdir = dcache_readdir, + .iterate = dcache_readdir, .llseek = dcache_dir_lseek, }; diff --git a/fs/bad_inode.c b/fs/bad_inode.c index 922ad460bff9..7c93953030fb 100644 --- a/fs/bad_inode.c +++ b/fs/bad_inode.c @@ -45,7 +45,7 @@ static ssize_t bad_file_aio_write(struct kiocb *iocb, const struct iovec *iov, return -EIO; } -static int bad_file_readdir(struct file *filp, void *dirent, filldir_t filldir) +static int bad_file_readdir(struct file *file, struct dir_context *ctx) { return -EIO; } @@ -152,7 +152,7 @@ static const struct file_operations bad_file_ops = .write = bad_file_write, .aio_read = bad_file_aio_read, .aio_write = bad_file_aio_write, - .readdir = bad_file_readdir, + .iterate = bad_file_readdir, .poll = bad_file_poll, .unlocked_ioctl = bad_file_unlocked_ioctl, .compat_ioctl = bad_file_compat_ioctl, diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c index f95dddced968..e9c75e20db32 100644 --- a/fs/befs/linuxvfs.c +++ b/fs/befs/linuxvfs.c @@ -31,7 +31,7 @@ MODULE_LICENSE("GPL"); /* The units the vfs expects inode->i_blocks to be in */ #define VFS_BLOCK_SIZE 512 -static int befs_readdir(struct file *, void *, filldir_t); +static int befs_readdir(struct file *, struct dir_context *); static int befs_get_block(struct inode *, sector_t, struct buffer_head *, int); static int befs_readpage(struct file *file, struct page *page); static sector_t befs_bmap(struct address_space *mapping, sector_t block); @@ -66,7 +66,7 @@ static struct kmem_cache *befs_inode_cachep; static const struct file_operations befs_dir_operations = { .read = generic_read_dir, - .readdir = befs_readdir, + .iterate = befs_readdir, .llseek = generic_file_llseek, }; @@ -211,9 +211,9 @@ befs_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) } static int -befs_readdir(struct file *filp, void *dirent, filldir_t filldir) +befs_readdir(struct file *file, struct dir_context *ctx) { - struct inode *inode = file_inode(filp); + struct inode *inode = file_inode(file); struct super_block *sb = inode->i_sb; befs_data_stream *ds = &BEFS_I(inode)->i_data.ds; befs_off_t value; @@ -221,15 +221,14 @@ befs_readdir(struct file *filp, void *dirent, filldir_t filldir) size_t keysize; unsigned char d_type; char keybuf[BEFS_NAME_LEN + 1]; - char *nlsname; - int nlsnamelen; - const char *dirname = filp->f_path.dentry->d_name.name; + const char *dirname = file->f_path.dentry->d_name.name; befs_debug(sb, "---> befs_readdir() " - "name %s, inode %ld, filp->f_pos %Ld", - dirname, inode->i_ino, filp->f_pos); + "name %s, inode %ld, ctx->pos %Ld", + dirname, inode->i_ino, ctx->pos); - result = befs_btree_read(sb, ds, filp->f_pos, BEFS_NAME_LEN + 1, +more: + result = befs_btree_read(sb, ds, ctx->pos, BEFS_NAME_LEN + 1, keybuf, &keysize, &value); if (result == BEFS_ERR) { @@ -251,24 +250,29 @@ befs_readdir(struct file *filp, void *dirent, filldir_t filldir) /* Convert to NLS */ if (BEFS_SB(sb)->nls) { + char *nlsname; + int nlsnamelen; result = befs_utf2nls(sb, keybuf, keysize, &nlsname, &nlsnamelen); if (result < 0) { befs_debug(sb, "<--- befs_readdir() ERROR"); return result; } - result = filldir(dirent, nlsname, nlsnamelen, filp->f_pos, - (ino_t) value, d_type); + if (!dir_emit(ctx, nlsname, nlsnamelen, + (ino_t) value, d_type)) { + kfree(nlsname); + return 0; + } kfree(nlsname); - } else { - result = filldir(dirent, keybuf, keysize, filp->f_pos, - (ino_t) value, d_type); + if (!dir_emit(ctx, keybuf, keysize, + (ino_t) value, d_type)) + return 0; } - if (!result) - filp->f_pos++; + ctx->pos++; + goto more; - befs_debug(sb, "<--- befs_readdir() filp->f_pos %Ld", filp->f_pos); + befs_debug(sb, "<--- befs_readdir() pos %Ld", ctx->pos); return 0; } diff --git a/fs/bfs/dir.c b/fs/bfs/dir.c index 3f422f6bb5ca..a399e6d9dc74 100644 --- a/fs/bfs/dir.c +++ b/fs/bfs/dir.c @@ -26,58 +26,51 @@ static struct buffer_head *bfs_find_entry(struct inode *dir, const unsigned char *name, int namelen, struct bfs_dirent **res_dir); -static int bfs_readdir(struct file *f, void *dirent, filldir_t filldir) +static int bfs_readdir(struct file *f, struct dir_context *ctx) { struct inode *dir = file_inode(f); struct buffer_head *bh; struct bfs_dirent *de; - struct bfs_sb_info *info = BFS_SB(dir->i_sb); unsigned int offset; int block; - mutex_lock(&info->bfs_lock); - - if (f->f_pos & (BFS_DIRENT_SIZE - 1)) { + if (ctx->pos & (BFS_DIRENT_SIZE - 1)) { printf("Bad f_pos=%08lx for %s:%08lx\n", - (unsigned long)f->f_pos, + (unsigned long)ctx->pos, dir->i_sb->s_id, dir->i_ino); - mutex_unlock(&info->bfs_lock); - return -EBADF; + return -EINVAL; } - while (f->f_pos < dir->i_size) { - offset = f->f_pos & (BFS_BSIZE - 1); - block = BFS_I(dir)->i_sblock + (f->f_pos >> BFS_BSIZE_BITS); + while (ctx->pos < dir->i_size) { + offset = ctx->pos & (BFS_BSIZE - 1); + block = BFS_I(dir)->i_sblock + (ctx->pos >> BFS_BSIZE_BITS); bh = sb_bread(dir->i_sb, block); if (!bh) { - f->f_pos += BFS_BSIZE - offset; + ctx->pos += BFS_BSIZE - offset; continue; } do { de = (struct bfs_dirent *)(bh->b_data + offset); if (de->ino) { int size = strnlen(de->name, BFS_NAMELEN); - if (filldir(dirent, de->name, size, f->f_pos, + if (!dir_emit(ctx, de->name, size, le16_to_cpu(de->ino), - DT_UNKNOWN) < 0) { + DT_UNKNOWN)) { brelse(bh); - mutex_unlock(&info->bfs_lock); return 0; } } offset += BFS_DIRENT_SIZE; - f->f_pos += BFS_DIRENT_SIZE; - } while ((offset < BFS_BSIZE) && (f->f_pos < dir->i_size)); + ctx->pos += BFS_DIRENT_SIZE; + } while ((offset < BFS_BSIZE) && (ctx->pos < dir->i_size)); brelse(bh); } - - mutex_unlock(&info->bfs_lock); - return 0; + return 0; } const struct file_operations bfs_dir_operations = { .read = generic_read_dir, - .readdir = bfs_readdir, + .iterate = bfs_readdir, .fsync = generic_file_fsync, .llseek = generic_file_llseek, }; diff --git a/fs/block_dev.c b/fs/block_dev.c index 2091db8cdd78..bb43ce081d6e 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -325,31 +325,10 @@ static int blkdev_write_end(struct file *file, struct address_space *mapping, static loff_t block_llseek(struct file *file, loff_t offset, int whence) { struct inode *bd_inode = file->f_mapping->host; - loff_t size; loff_t retval; mutex_lock(&bd_inode->i_mutex); - size = i_size_read(bd_inode); - - retval = -EINVAL; - switch (whence) { - case SEEK_END: - offset += size; - break; - case SEEK_CUR: - offset += file->f_pos; - case SEEK_SET: - break; - default: - goto out; - } - if (offset >= 0 && offset <= size) { - if (offset != file->f_pos) { - file->f_pos = offset; - } - retval = offset; - } -out: + retval = fixed_size_llseek(file, offset, whence, i_size_read(bd_inode)); mutex_unlock(&bd_inode->i_mutex); return retval; } @@ -1583,6 +1562,7 @@ static const struct address_space_operations def_blk_aops = { .writepages = generic_writepages, .releasepage = blkdev_releasepage, .direct_IO = blkdev_direct_IO, + .is_dirty_writeback = buffer_check_dirty_writeback, }; const struct file_operations def_blk_fops = { diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index 02fae7f7e42c..17dffe33e8d0 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -4430,7 +4430,7 @@ void btrfs_truncate_item(struct btrfs_root *root, struct btrfs_path *path, } /* - * make the item pointed to by the path bigger, data_size is the new size. + * make the item pointed to by the path bigger, data_size is the added size. */ void btrfs_extend_item(struct btrfs_root *root, struct btrfs_path *path, u32 data_size) diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c index f26f38ccd194..eb34438ddedb 100644 --- a/fs/btrfs/delayed-inode.c +++ b/fs/btrfs/delayed-inode.c @@ -1681,8 +1681,7 @@ int btrfs_should_delete_dir_index(struct list_head *del_list, * btrfs_readdir_delayed_dir_index - read dir info stored in the delayed tree * */ -int btrfs_readdir_delayed_dir_index(struct file *filp, void *dirent, - filldir_t filldir, +int btrfs_readdir_delayed_dir_index(struct dir_context *ctx, struct list_head *ins_list) { struct btrfs_dir_item *di; @@ -1704,13 +1703,13 @@ int btrfs_readdir_delayed_dir_index(struct file *filp, void *dirent, list_for_each_entry_safe(curr, next, ins_list, readdir_list) { list_del(&curr->readdir_list); - if (curr->key.offset < filp->f_pos) { + if (curr->key.offset < ctx->pos) { if (atomic_dec_and_test(&curr->refs)) kfree(curr); continue; } - filp->f_pos = curr->key.offset; + ctx->pos = curr->key.offset; di = (struct btrfs_dir_item *)curr->data; name = (char *)(di + 1); @@ -1719,7 +1718,7 @@ int btrfs_readdir_delayed_dir_index(struct file *filp, void *dirent, d_type = btrfs_filetype_table[di->type]; btrfs_disk_key_to_cpu(&location, &di->location); - over = filldir(dirent, name, name_len, curr->key.offset, + over = !dir_emit(ctx, name, name_len, location.objectid, d_type); if (atomic_dec_and_test(&curr->refs)) diff --git a/fs/btrfs/delayed-inode.h b/fs/btrfs/delayed-inode.h index 1d5c5f7abe3e..a4b38f934d14 100644 --- a/fs/btrfs/delayed-inode.h +++ b/fs/btrfs/delayed-inode.h @@ -139,8 +139,7 @@ void btrfs_put_delayed_items(struct list_head *ins_list, struct list_head *del_list); int btrfs_should_delete_dir_index(struct list_head *del_list, u64 index); -int btrfs_readdir_delayed_dir_index(struct file *filp, void *dirent, - filldir_t filldir, +int btrfs_readdir_delayed_dir_index(struct dir_context *ctx, struct list_head *ins_list); /* for init */ diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index e7b3cb5286a5..b0292b3ead54 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1013,7 +1013,8 @@ static int btree_releasepage(struct page *page, gfp_t gfp_flags) return try_release_extent_buffer(page); } -static void btree_invalidatepage(struct page *page, unsigned long offset) +static void btree_invalidatepage(struct page *page, unsigned int offset, + unsigned int length) { struct extent_io_tree *tree; tree = &BTRFS_I(page->mapping->host)->io_tree; @@ -2859,8 +2860,8 @@ fail_qgroup: btrfs_free_qgroup_config(fs_info); fail_trans_kthread: kthread_stop(fs_info->transaction_kthread); - del_fs_roots(fs_info); btrfs_cleanup_transaction(fs_info->tree_root); + del_fs_roots(fs_info); fail_cleaner: kthread_stop(fs_info->cleaner_kthread); @@ -3512,15 +3513,15 @@ int close_ctree(struct btrfs_root *root) percpu_counter_sum(&fs_info->delalloc_bytes)); } - free_root_pointers(fs_info, 1); - btrfs_free_block_groups(fs_info); + btrfs_stop_all_workers(fs_info); + del_fs_roots(fs_info); - iput(fs_info->btree_inode); + free_root_pointers(fs_info, 1); - btrfs_stop_all_workers(fs_info); + iput(fs_info->btree_inode); #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY if (btrfs_test_opt(root, CHECK_INTEGRITY)) diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index e7e7afb4a872..6bca9472f313 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -2957,7 +2957,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc, pg_offset = i_size & (PAGE_CACHE_SIZE - 1); if (page->index > end_index || (page->index == end_index && !pg_offset)) { - page->mapping->a_ops->invalidatepage(page, 0); + page->mapping->a_ops->invalidatepage(page, 0, PAGE_CACHE_SIZE); unlock_page(page); return 0; } diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 4205ba752d40..89da56a58b63 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -2425,20 +2425,7 @@ static loff_t btrfs_file_llseek(struct file *file, loff_t offset, int whence) } } - if (offset < 0 && !(file->f_mode & FMODE_UNSIGNED_OFFSET)) { - offset = -EINVAL; - goto out; - } - if (offset > inode->i_sb->s_maxbytes) { - offset = -EINVAL; - goto out; - } - - /* Special lock needed here? */ - if (offset != file->f_pos) { - file->f_pos = offset; - file->f_version = 0; - } + offset = vfs_setpos(file, offset, inode->i_sb->s_maxbytes); out: mutex_unlock(&inode->i_mutex); return offset; diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index e53009657f0e..2750b5023526 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -3200,7 +3200,7 @@ static int test_extents(struct btrfs_block_group_cache *cache) ret = btrfs_remove_free_space(cache, 2 * 1024 * 1024, 4096); if (ret) { - printk(KERN_ERR "Error removing middle peice %d\n", ret); + printk(KERN_ERR "Error removing middle piece %d\n", ret); return ret; } diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index af978f7682b3..4f9d16b70d3d 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -5137,10 +5137,9 @@ unsigned char btrfs_filetype_table[] = { DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK }; -static int btrfs_real_readdir(struct file *filp, void *dirent, - filldir_t filldir) +static int btrfs_real_readdir(struct file *file, struct dir_context *ctx) { - struct inode *inode = file_inode(filp); + struct inode *inode = file_inode(file); struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_item *item; struct btrfs_dir_item *di; @@ -5161,29 +5160,15 @@ static int btrfs_real_readdir(struct file *filp, void *dirent, char tmp_name[32]; char *name_ptr; int name_len; - int is_curr = 0; /* filp->f_pos points to the current index? */ + int is_curr = 0; /* ctx->pos points to the current index? */ /* FIXME, use a real flag for deciding about the key type */ if (root->fs_info->tree_root == root) key_type = BTRFS_DIR_ITEM_KEY; - /* special case for "." */ - if (filp->f_pos == 0) { - over = filldir(dirent, ".", 1, - filp->f_pos, btrfs_ino(inode), DT_DIR); - if (over) - return 0; - filp->f_pos = 1; - } - /* special case for .., just use the back ref */ - if (filp->f_pos == 1) { - u64 pino = parent_ino(filp->f_path.dentry); - over = filldir(dirent, "..", 2, - filp->f_pos, pino, DT_DIR); - if (over) - return 0; - filp->f_pos = 2; - } + if (!dir_emit_dots(file, ctx)) + return 0; + path = btrfs_alloc_path(); if (!path) return -ENOMEM; @@ -5197,7 +5182,7 @@ static int btrfs_real_readdir(struct file *filp, void *dirent, } btrfs_set_key_type(&key, key_type); - key.offset = filp->f_pos; + key.offset = ctx->pos; key.objectid = btrfs_ino(inode); ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); @@ -5223,14 +5208,14 @@ static int btrfs_real_readdir(struct file *filp, void *dirent, break; if (btrfs_key_type(&found_key) != key_type) break; - if (found_key.offset < filp->f_pos) + if (found_key.offset < ctx->pos) goto next; if (key_type == BTRFS_DIR_INDEX_KEY && btrfs_should_delete_dir_index(&del_list, found_key.offset)) goto next; - filp->f_pos = found_key.offset; + ctx->pos = found_key.offset; is_curr = 1; di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item); @@ -5274,9 +5259,8 @@ static int btrfs_real_readdir(struct file *filp, void *dirent, over = 0; goto skip; } - over = filldir(dirent, name_ptr, name_len, - found_key.offset, location.objectid, - d_type); + over = !dir_emit(ctx, name_ptr, name_len, + location.objectid, d_type); skip: if (name_ptr != tmp_name) @@ -5295,9 +5279,8 @@ next: if (key_type == BTRFS_DIR_INDEX_KEY) { if (is_curr) - filp->f_pos++; - ret = btrfs_readdir_delayed_dir_index(filp, dirent, filldir, - &ins_list); + ctx->pos++; + ret = btrfs_readdir_delayed_dir_index(ctx, &ins_list); if (ret) goto nopos; } @@ -5308,9 +5291,9 @@ next: * 32-bit glibc will use getdents64, but then strtol - * so the last number we can serve is this. */ - filp->f_pos = 0x7fffffff; + ctx->pos = 0x7fffffff; else - filp->f_pos++; + ctx->pos++; nopos: ret = 0; err: @@ -7510,7 +7493,8 @@ static int btrfs_releasepage(struct page *page, gfp_t gfp_flags) return __btrfs_releasepage(page, gfp_flags & GFP_NOFS); } -static void btrfs_invalidatepage(struct page *page, unsigned long offset) +static void btrfs_invalidatepage(struct page *page, unsigned int offset, + unsigned int length) { struct inode *inode = page->mapping->host; struct extent_io_tree *tree; @@ -8012,6 +7996,9 @@ int btrfs_drop_inode(struct inode *inode) { struct btrfs_root *root = BTRFS_I(inode)->root; + if (root == NULL) + return 1; + /* the snap/subvol tree is on deleting */ if (btrfs_root_refs(&root->root_item) == 0 && root != root->fs_info->tree_root) @@ -8728,7 +8715,7 @@ static const struct inode_operations btrfs_dir_ro_inode_operations = { static const struct file_operations btrfs_dir_file_operations = { .llseek = generic_file_llseek, .read = generic_read_dir, - .readdir = btrfs_real_readdir, + .iterate = btrfs_real_readdir, .unlocked_ioctl = btrfs_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = btrfs_ioctl, diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 0f81d67cdc8d..cd7e96c73cb7 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -3881,7 +3881,7 @@ drop_write: static long btrfs_ioctl_quota_rescan(struct file *file, void __user *arg) { - struct btrfs_root *root = BTRFS_I(fdentry(file)->d_inode)->root; + struct btrfs_root *root = BTRFS_I(file_inode(file))->root; struct btrfs_ioctl_quota_rescan_args *qsa; int ret; @@ -3914,7 +3914,7 @@ drop_write: static long btrfs_ioctl_quota_rescan_status(struct file *file, void __user *arg) { - struct btrfs_root *root = BTRFS_I(fdentry(file)->d_inode)->root; + struct btrfs_root *root = BTRFS_I(file_inode(file))->root; struct btrfs_ioctl_quota_rescan_args *qsa; int ret = 0; @@ -4020,7 +4020,7 @@ out: static int btrfs_ioctl_get_fslabel(struct file *file, void __user *arg) { - struct btrfs_root *root = BTRFS_I(fdentry(file)->d_inode)->root; + struct btrfs_root *root = BTRFS_I(file_inode(file))->root; const char *label = root->fs_info->super_copy->label; size_t len = strnlen(label, BTRFS_LABEL_SIZE); int ret; @@ -4039,7 +4039,7 @@ static int btrfs_ioctl_get_fslabel(struct file *file, void __user *arg) static int btrfs_ioctl_set_fslabel(struct file *file, void __user *arg) { - struct btrfs_root *root = BTRFS_I(fdentry(file)->d_inode)->root; + struct btrfs_root *root = BTRFS_I(file_inode(file))->root; struct btrfs_super_block *super_block = root->fs_info->super_copy; struct btrfs_trans_handle *trans; char label[BTRFS_LABEL_SIZE]; diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index 395b82031a42..4febca4fc2de 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -4082,7 +4082,7 @@ out: return inode; } -static struct reloc_control *alloc_reloc_control(void) +static struct reloc_control *alloc_reloc_control(struct btrfs_fs_info *fs_info) { struct reloc_control *rc; @@ -4093,7 +4093,8 @@ static struct reloc_control *alloc_reloc_control(void) INIT_LIST_HEAD(&rc->reloc_roots); backref_cache_init(&rc->backref_cache); mapping_tree_init(&rc->reloc_root_tree); - extent_io_tree_init(&rc->processed_blocks, NULL); + extent_io_tree_init(&rc->processed_blocks, + fs_info->btree_inode->i_mapping); return rc; } @@ -4110,7 +4111,7 @@ int btrfs_relocate_block_group(struct btrfs_root *extent_root, u64 group_start) int rw = 0; int err = 0; - rc = alloc_reloc_control(); + rc = alloc_reloc_control(fs_info); if (!rc) return -ENOMEM; @@ -4311,7 +4312,7 @@ int btrfs_recover_relocation(struct btrfs_root *root) if (list_empty(&reloc_roots)) goto out; - rc = alloc_reloc_control(); + rc = alloc_reloc_control(root->fs_info); if (!rc) { err = -ENOMEM; goto out; diff --git a/fs/buffer.c b/fs/buffer.c index d2a4d1bb2d57..4d7433534f5c 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -83,6 +83,40 @@ void unlock_buffer(struct buffer_head *bh) EXPORT_SYMBOL(unlock_buffer); /* + * Returns if the page has dirty or writeback buffers. If all the buffers + * are unlocked and clean then the PageDirty information is stale. If + * any of the pages are locked, it is assumed they are locked for IO. + */ +void buffer_check_dirty_writeback(struct page *page, + bool *dirty, bool *writeback) +{ + struct buffer_head *head, *bh; + *dirty = false; + *writeback = false; + + BUG_ON(!PageLocked(page)); + + if (!page_has_buffers(page)) + return; + + if (PageWriteback(page)) + *writeback = true; + + head = page_buffers(page); + bh = head; + do { + if (buffer_locked(bh)) + *writeback = true; + + if (buffer_dirty(bh)) + *dirty = true; + + bh = bh->b_this_page; + } while (bh != head); +} +EXPORT_SYMBOL(buffer_check_dirty_writeback); + +/* * Block until a buffer comes unlocked. This doesn't stop it * from becoming locked again - you have to lock it yourself * if you want to preserve its state. @@ -1454,7 +1488,8 @@ static void discard_buffer(struct buffer_head * bh) * block_invalidatepage - invalidate part or all of a buffer-backed page * * @page: the page which is affected - * @offset: the index of the truncation point + * @offset: start of the range to invalidate + * @length: length of the range to invalidate * * block_invalidatepage() is called when all or part of the page has become * invalidated by a truncate operation. @@ -1465,15 +1500,22 @@ static void discard_buffer(struct buffer_head * bh) * point. Because the caller is about to free (and possibly reuse) those * blocks on-disk. */ -void block_invalidatepage(struct page *page, unsigned long offset) +void block_invalidatepage(struct page *page, unsigned int offset, + unsigned int length) { struct buffer_head *head, *bh, *next; unsigned int curr_off = 0; + unsigned int stop = length + offset; BUG_ON(!PageLocked(page)); if (!page_has_buffers(page)) goto out; + /* + * Check for overflow + */ + BUG_ON(stop > PAGE_CACHE_SIZE || stop < length); + head = page_buffers(page); bh = head; do { @@ -1481,6 +1523,12 @@ void block_invalidatepage(struct page *page, unsigned long offset) next = bh->b_this_page; /* + * Are we still fully in range ? + */ + if (next_off > stop) + goto out; + + /* * is this block fully invalidated? */ if (offset <= curr_off) @@ -1501,6 +1549,7 @@ out: } EXPORT_SYMBOL(block_invalidatepage); + /* * We attach and possibly dirty the buffers atomically wrt * __set_page_dirty_buffers() via private_lock. try_to_free_buffers @@ -2841,7 +2890,7 @@ int block_write_full_page_endio(struct page *page, get_block_t *get_block, * they may have been added in ext3_writepage(). Make them * freeable here, so the page does not leak. */ - do_invalidatepage(page, 0); + do_invalidatepage(page, 0, PAGE_CACHE_SIZE); unlock_page(page); return 0; /* don't care */ } diff --git a/fs/cachefiles/interface.c b/fs/cachefiles/interface.c index 746ce532e130..d4c1206af9fc 100644 --- a/fs/cachefiles/interface.c +++ b/fs/cachefiles/interface.c @@ -13,8 +13,6 @@ #include <linux/mount.h> #include "internal.h" -#define list_to_page(head) (list_entry((head)->prev, struct page, lru)) - struct cachefiles_lookup_data { struct cachefiles_xattr *auxdata; /* auxiliary data */ char *key; /* key path */ @@ -212,20 +210,29 @@ static void cachefiles_update_object(struct fscache_object *_object) object = container_of(_object, struct cachefiles_object, fscache); cache = container_of(object->fscache.cache, struct cachefiles_cache, cache); + + if (!fscache_use_cookie(_object)) { + _leave(" [relinq]"); + return; + } + cookie = object->fscache.cookie; if (!cookie->def->get_aux) { + fscache_unuse_cookie(_object); _leave(" [no aux]"); return; } auxdata = kmalloc(2 + 512 + 3, cachefiles_gfp); if (!auxdata) { + fscache_unuse_cookie(_object); _leave(" [nomem]"); return; } auxlen = cookie->def->get_aux(cookie->netfs_data, auxdata->data, 511); + fscache_unuse_cookie(_object); ASSERTCMP(auxlen, <, 511); auxdata->len = auxlen + 1; @@ -263,7 +270,7 @@ static void cachefiles_drop_object(struct fscache_object *_object) #endif /* delete retired objects */ - if (object->fscache.state == FSCACHE_OBJECT_RECYCLING && + if (test_bit(FSCACHE_COOKIE_RETIRED, &object->fscache.cookie->flags) && _object != cache->cache.fsdef ) { _debug("- retire object OBJ%x", object->fscache.debug_id); diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c index 8c01c5fcdf75..25badd1aec5c 100644 --- a/fs/cachefiles/namei.c +++ b/fs/cachefiles/namei.c @@ -38,7 +38,7 @@ void __cachefiles_printk_object(struct cachefiles_object *object, printk(KERN_ERR "%sobject: OBJ%x\n", prefix, object->fscache.debug_id); printk(KERN_ERR "%sobjstate=%s fl=%lx wbusy=%x ev=%lx[%lx]\n", - prefix, fscache_object_states[object->fscache.state], + prefix, object->fscache.state->name, object->fscache.flags, work_busy(&object->fscache.work), object->fscache.events, object->fscache.event_mask); printk(KERN_ERR "%sops=%u inp=%u exc=%u\n", @@ -127,10 +127,10 @@ static void cachefiles_mark_object_buried(struct cachefiles_cache *cache, found_dentry: kdebug("preemptive burial: OBJ%x [%s] %p", object->fscache.debug_id, - fscache_object_states[object->fscache.state], + object->fscache.state->name, dentry); - if (object->fscache.state < FSCACHE_OBJECT_DYING) { + if (fscache_object_is_live(&object->fscache)) { printk(KERN_ERR "\n"); printk(KERN_ERR "CacheFiles: Error:" " Can't preemptively bury live object\n"); @@ -192,7 +192,7 @@ try_again: /* an old object from a previous incarnation is hogging the slot - we * need to wait for it to be destroyed */ wait_for_old_object: - if (xobject->fscache.state < FSCACHE_OBJECT_DYING) { + if (fscache_object_is_live(&object->fscache)) { printk(KERN_ERR "\n"); printk(KERN_ERR "CacheFiles: Error:" " Unexpected object collision\n"); @@ -836,7 +836,7 @@ static struct dentry *cachefiles_check_active(struct cachefiles_cache *cache, // dir->d_name.len, dir->d_name.len, dir->d_name.name, filename); /* look up the victim */ - mutex_lock_nested(&dir->d_inode->i_mutex, 1); + mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_PARENT); start = jiffies; victim = lookup_one_len(filename, dir, strlen(filename)); diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c index 317f9ee9c991..ebaff368120d 100644 --- a/fs/cachefiles/rdwr.c +++ b/fs/cachefiles/rdwr.c @@ -12,6 +12,7 @@ #include <linux/mount.h> #include <linux/slab.h> #include <linux/file.h> +#include <linux/swap.h> #include "internal.h" /* @@ -227,8 +228,7 @@ static void cachefiles_read_copier(struct fscache_operation *_op) */ static int cachefiles_read_backing_file_one(struct cachefiles_object *object, struct fscache_retrieval *op, - struct page *netpage, - struct pagevec *pagevec) + struct page *netpage) { struct cachefiles_one_read *monitor; struct address_space *bmapping; @@ -237,8 +237,6 @@ static int cachefiles_read_backing_file_one(struct cachefiles_object *object, _enter(""); - pagevec_reinit(pagevec); - _debug("read back %p{%lu,%d}", netpage, netpage->index, page_count(netpage)); @@ -283,9 +281,7 @@ installed_new_backing_page: backpage = newpage; newpage = NULL; - page_cache_get(backpage); - pagevec_add(pagevec, backpage); - __pagevec_lru_add_file(pagevec); + lru_cache_add_file(backpage); read_backing_page: ret = bmapping->a_ops->readpage(NULL, backpage); @@ -452,8 +448,7 @@ int cachefiles_read_or_alloc_page(struct fscache_retrieval *op, if (block) { /* submit the apparently valid page to the backing fs to be * read from disk */ - ret = cachefiles_read_backing_file_one(object, op, page, - &pagevec); + ret = cachefiles_read_backing_file_one(object, op, page); } else if (cachefiles_has_space(cache, 0, 1) == 0) { /* there's space in the cache we can use */ fscache_mark_page_cached(op, page); @@ -482,14 +477,11 @@ static int cachefiles_read_backing_file(struct cachefiles_object *object, { struct cachefiles_one_read *monitor = NULL; struct address_space *bmapping = object->backer->d_inode->i_mapping; - struct pagevec lru_pvec; struct page *newpage = NULL, *netpage, *_n, *backpage = NULL; int ret = 0; _enter(""); - pagevec_init(&lru_pvec, 0); - list_for_each_entry_safe(netpage, _n, list, lru) { list_del(&netpage->lru); @@ -534,9 +526,7 @@ static int cachefiles_read_backing_file(struct cachefiles_object *object, backpage = newpage; newpage = NULL; - page_cache_get(backpage); - if (!pagevec_add(&lru_pvec, backpage)) - __pagevec_lru_add_file(&lru_pvec); + lru_cache_add_file(backpage); reread_backing_page: ret = bmapping->a_ops->readpage(NULL, backpage); @@ -559,9 +549,7 @@ static int cachefiles_read_backing_file(struct cachefiles_object *object, goto nomem; } - page_cache_get(netpage); - if (!pagevec_add(&lru_pvec, netpage)) - __pagevec_lru_add_file(&lru_pvec); + lru_cache_add_file(netpage); /* install a monitor */ page_cache_get(netpage); @@ -643,9 +631,7 @@ static int cachefiles_read_backing_file(struct cachefiles_object *object, fscache_mark_page_cached(op, netpage); - page_cache_get(netpage); - if (!pagevec_add(&lru_pvec, netpage)) - __pagevec_lru_add_file(&lru_pvec); + lru_cache_add_file(netpage); /* the netpage is unlocked and marked up to date here */ fscache_end_io(op, netpage, 0); @@ -661,8 +647,6 @@ static int cachefiles_read_backing_file(struct cachefiles_object *object, out: /* tidy up */ - pagevec_lru_add_file(&lru_pvec); - if (newpage) page_cache_release(newpage); if (netpage) diff --git a/fs/cachefiles/xattr.c b/fs/cachefiles/xattr.c index 73b46288b54b..2476e5162609 100644 --- a/fs/cachefiles/xattr.c +++ b/fs/cachefiles/xattr.c @@ -109,13 +109,12 @@ int cachefiles_set_object_xattr(struct cachefiles_object *object, struct dentry *dentry = object->dentry; int ret; - ASSERT(object->fscache.cookie); ASSERT(dentry); _enter("%p,#%d", object, auxdata->len); /* attempt to install the cache metadata directly */ - _debug("SET %s #%u", object->fscache.cookie->def->name, auxdata->len); + _debug("SET #%u", auxdata->len); ret = vfs_setxattr(dentry, cachefiles_xattr_cache, &auxdata->type, auxdata->len, @@ -138,13 +137,12 @@ int cachefiles_update_object_xattr(struct cachefiles_object *object, struct dentry *dentry = object->dentry; int ret; - ASSERT(object->fscache.cookie); ASSERT(dentry); _enter("%p,#%d", object, auxdata->len); /* attempt to install the cache metadata directly */ - _debug("SET %s #%u", object->fscache.cookie->def->name, auxdata->len); + _debug("SET #%u", auxdata->len); ret = vfs_setxattr(dentry, cachefiles_xattr_cache, &auxdata->type, auxdata->len, diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c index 3e68ac101040..38b5c1bc6776 100644 --- a/fs/ceph/addr.c +++ b/fs/ceph/addr.c @@ -143,7 +143,8 @@ static int ceph_set_page_dirty(struct page *page) * dirty page counters appropriately. Only called if there is private * data on the page. */ -static void ceph_invalidatepage(struct page *page, unsigned long offset) +static void ceph_invalidatepage(struct page *page, unsigned int offset, + unsigned int length) { struct inode *inode; struct ceph_inode_info *ci; @@ -163,20 +164,20 @@ static void ceph_invalidatepage(struct page *page, unsigned long offset) if (!PageDirty(page)) pr_err("%p invalidatepage %p page not dirty\n", inode, page); - if (offset == 0) + if (offset == 0 && length == PAGE_CACHE_SIZE) ClearPageChecked(page); ci = ceph_inode(inode); - if (offset == 0) { - dout("%p invalidatepage %p idx %lu full dirty page %lu\n", - inode, page, page->index, offset); + if (offset == 0 && length == PAGE_CACHE_SIZE) { + dout("%p invalidatepage %p idx %lu full dirty page\n", + inode, page, page->index); ceph_put_wrbuffer_cap_refs(ci, 1, snapc); ceph_put_snap_context(snapc); page->private = 0; ClearPagePrivate(page); } else { - dout("%p invalidatepage %p idx %lu partial dirty page\n", - inode, page, page->index); + dout("%p invalidatepage %p idx %lu partial dirty page %u(%u)\n", + inode, page, page->index, offset, length); } } diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c index f02d82b7933e..a40ceda47a32 100644 --- a/fs/ceph/dir.c +++ b/fs/ceph/dir.c @@ -111,11 +111,10 @@ static unsigned fpos_off(loff_t p) * defined IFF we hold CEPH_CAP_FILE_SHARED (which will be revoked by * the MDS if/when the directory is modified). */ -static int __dcache_readdir(struct file *filp, - void *dirent, filldir_t filldir) +static int __dcache_readdir(struct file *file, struct dir_context *ctx) { - struct ceph_file_info *fi = filp->private_data; - struct dentry *parent = filp->f_dentry; + struct ceph_file_info *fi = file->private_data; + struct dentry *parent = file->f_dentry; struct inode *dir = parent->d_inode; struct list_head *p; struct dentry *dentry, *last; @@ -126,14 +125,14 @@ static int __dcache_readdir(struct file *filp, last = fi->dentry; fi->dentry = NULL; - dout("__dcache_readdir %p at %llu (last %p)\n", dir, filp->f_pos, + dout("__dcache_readdir %p at %llu (last %p)\n", dir, ctx->pos, last); spin_lock(&parent->d_lock); /* start at beginning? */ - if (filp->f_pos == 2 || last == NULL || - filp->f_pos < ceph_dentry(last)->offset) { + if (ctx->pos == 2 || last == NULL || + ctx->pos < ceph_dentry(last)->offset) { if (list_empty(&parent->d_subdirs)) goto out_unlock; p = parent->d_subdirs.prev; @@ -157,11 +156,11 @@ more: if (!d_unhashed(dentry) && dentry->d_inode && ceph_snap(dentry->d_inode) != CEPH_SNAPDIR && ceph_ino(dentry->d_inode) != CEPH_INO_CEPH && - filp->f_pos <= di->offset) + ctx->pos <= di->offset) break; dout(" skipping %p %.*s at %llu (%llu)%s%s\n", dentry, dentry->d_name.len, dentry->d_name.name, di->offset, - filp->f_pos, d_unhashed(dentry) ? " unhashed" : "", + ctx->pos, d_unhashed(dentry) ? " unhashed" : "", !dentry->d_inode ? " null" : ""); spin_unlock(&dentry->d_lock); p = p->prev; @@ -173,29 +172,27 @@ more: spin_unlock(&dentry->d_lock); spin_unlock(&parent->d_lock); - dout(" %llu (%llu) dentry %p %.*s %p\n", di->offset, filp->f_pos, + dout(" %llu (%llu) dentry %p %.*s %p\n", di->offset, ctx->pos, dentry, dentry->d_name.len, dentry->d_name.name, dentry->d_inode); - filp->f_pos = di->offset; - err = filldir(dirent, dentry->d_name.name, - dentry->d_name.len, di->offset, + ctx->pos = di->offset; + if (!dir_emit(ctx, dentry->d_name.name, + dentry->d_name.len, ceph_translate_ino(dentry->d_sb, dentry->d_inode->i_ino), - dentry->d_inode->i_mode >> 12); - - if (last) { - if (err < 0) { + dentry->d_inode->i_mode >> 12)) { + if (last) { /* remember our position */ fi->dentry = last; fi->next_offset = di->offset; - } else { - dput(last); } + dput(dentry); + return 0; } - last = dentry; - if (err < 0) - goto out; + if (last) + dput(last); + last = dentry; - filp->f_pos++; + ctx->pos++; /* make sure a dentry wasn't dropped while we didn't have parent lock */ if (!ceph_dir_is_complete(dir)) { @@ -235,59 +232,59 @@ static int note_last_dentry(struct ceph_file_info *fi, const char *name, return 0; } -static int ceph_readdir(struct file *filp, void *dirent, filldir_t filldir) +static int ceph_readdir(struct file *file, struct dir_context *ctx) { - struct ceph_file_info *fi = filp->private_data; - struct inode *inode = file_inode(filp); + struct ceph_file_info *fi = file->private_data; + struct inode *inode = file_inode(file); struct ceph_inode_info *ci = ceph_inode(inode); struct ceph_fs_client *fsc = ceph_inode_to_client(inode); struct ceph_mds_client *mdsc = fsc->mdsc; - unsigned frag = fpos_frag(filp->f_pos); - int off = fpos_off(filp->f_pos); + unsigned frag = fpos_frag(ctx->pos); + int off = fpos_off(ctx->pos); int err; u32 ftype; struct ceph_mds_reply_info_parsed *rinfo; const int max_entries = fsc->mount_options->max_readdir; const int max_bytes = fsc->mount_options->max_readdir_bytes; - dout("readdir %p filp %p frag %u off %u\n", inode, filp, frag, off); + dout("readdir %p file %p frag %u off %u\n", inode, file, frag, off); if (fi->flags & CEPH_F_ATEND) return 0; /* always start with . and .. */ - if (filp->f_pos == 0) { + if (ctx->pos == 0) { /* note dir version at start of readdir so we can tell * if any dentries get dropped */ fi->dir_release_count = atomic_read(&ci->i_release_count); dout("readdir off 0 -> '.'\n"); - if (filldir(dirent, ".", 1, ceph_make_fpos(0, 0), + if (!dir_emit(ctx, ".", 1, ceph_translate_ino(inode->i_sb, inode->i_ino), - inode->i_mode >> 12) < 0) + inode->i_mode >> 12)) return 0; - filp->f_pos = 1; + ctx->pos = 1; off = 1; } - if (filp->f_pos == 1) { - ino_t ino = parent_ino(filp->f_dentry); + if (ctx->pos == 1) { + ino_t ino = parent_ino(file->f_dentry); dout("readdir off 1 -> '..'\n"); - if (filldir(dirent, "..", 2, ceph_make_fpos(0, 1), + if (!dir_emit(ctx, "..", 2, ceph_translate_ino(inode->i_sb, ino), - inode->i_mode >> 12) < 0) + inode->i_mode >> 12)) return 0; - filp->f_pos = 2; + ctx->pos = 2; off = 2; } /* can we use the dcache? */ spin_lock(&ci->i_ceph_lock); - if ((filp->f_pos == 2 || fi->dentry) && + if ((ctx->pos == 2 || fi->dentry) && !ceph_test_mount_opt(fsc, NOASYNCREADDIR) && ceph_snap(inode) != CEPH_SNAPDIR && __ceph_dir_is_complete(ci) && __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1)) { spin_unlock(&ci->i_ceph_lock); - err = __dcache_readdir(filp, dirent, filldir); + err = __dcache_readdir(file, ctx); if (err != -EAGAIN) return err; } else { @@ -327,7 +324,7 @@ more: return PTR_ERR(req); req->r_inode = inode; ihold(inode); - req->r_dentry = dget(filp->f_dentry); + req->r_dentry = dget(file->f_dentry); /* hints to request -> mds selection code */ req->r_direct_mode = USE_AUTH_MDS; req->r_direct_hash = ceph_frag_value(frag); @@ -379,15 +376,16 @@ more: rinfo = &fi->last_readdir->r_reply_info; dout("readdir frag %x num %d off %d chunkoff %d\n", frag, rinfo->dir_nr, off, fi->offset); + + ctx->pos = ceph_make_fpos(frag, off); while (off >= fi->offset && off - fi->offset < rinfo->dir_nr) { - u64 pos = ceph_make_fpos(frag, off); struct ceph_mds_reply_inode *in = rinfo->dir_in[off - fi->offset].in; struct ceph_vino vino; ino_t ino; dout("readdir off %d (%d/%d) -> %lld '%.*s' %p\n", - off, off - fi->offset, rinfo->dir_nr, pos, + off, off - fi->offset, rinfo->dir_nr, ctx->pos, rinfo->dir_dname_len[off - fi->offset], rinfo->dir_dname[off - fi->offset], in); BUG_ON(!in); @@ -395,16 +393,15 @@ more: vino.ino = le64_to_cpu(in->ino); vino.snap = le64_to_cpu(in->snapid); ino = ceph_vino_to_ino(vino); - if (filldir(dirent, + if (!dir_emit(ctx, rinfo->dir_dname[off - fi->offset], rinfo->dir_dname_len[off - fi->offset], - pos, - ceph_translate_ino(inode->i_sb, ino), ftype) < 0) { + ceph_translate_ino(inode->i_sb, ino), ftype)) { dout("filldir stopping us...\n"); return 0; } off++; - filp->f_pos = pos + 1; + ctx->pos++; } if (fi->last_name) { @@ -417,7 +414,7 @@ more: if (!ceph_frag_is_rightmost(frag)) { frag = ceph_frag_next(frag); off = 0; - filp->f_pos = ceph_make_fpos(frag, off); + ctx->pos = ceph_make_fpos(frag, off); dout("readdir next frag is %x\n", frag); goto more; } @@ -432,11 +429,11 @@ more: if (atomic_read(&ci->i_release_count) == fi->dir_release_count) { dout(" marking %p complete\n", inode); __ceph_dir_set_complete(ci, fi->dir_release_count); - ci->i_max_offset = filp->f_pos; + ci->i_max_offset = ctx->pos; } spin_unlock(&ci->i_ceph_lock); - dout("readdir %p filp %p done.\n", inode, filp); + dout("readdir %p file %p done.\n", inode, file); return 0; } @@ -1268,7 +1265,7 @@ unsigned ceph_dentry_hash(struct inode *dir, struct dentry *dn) const struct file_operations ceph_dir_fops = { .read = ceph_read_dir, - .readdir = ceph_readdir, + .iterate = ceph_readdir, .llseek = ceph_dir_llseek, .open = ceph_open, .release = ceph_release, diff --git a/fs/ceph/file.c b/fs/ceph/file.c index 656e16907430..16c989d3e23c 100644 --- a/fs/ceph/file.c +++ b/fs/ceph/file.c @@ -866,16 +866,7 @@ static loff_t ceph_llseek(struct file *file, loff_t offset, int whence) break; } - if (offset < 0 || offset > inode->i_sb->s_maxbytes) { - offset = -EINVAL; - goto out; - } - - /* Special lock needed here? */ - if (offset != file->f_pos) { - file->f_pos = offset; - file->f_version = 0; - } + offset = vfs_setpos(file, offset, inode->i_sb->s_maxbytes); out: mutex_unlock(&inode->i_mutex); diff --git a/fs/ceph/locks.c b/fs/ceph/locks.c index 202dd3d68be0..690f73f42425 100644 --- a/fs/ceph/locks.c +++ b/fs/ceph/locks.c @@ -191,27 +191,23 @@ void ceph_count_locks(struct inode *inode, int *fcntl_count, int *flock_count) } /** - * Encode the flock and fcntl locks for the given inode into the pagelist. - * Format is: #fcntl locks, sequential fcntl locks, #flock locks, - * sequential flock locks. - * Must be called with lock_flocks() already held. - * If we encounter more of a specific lock type than expected, - * we return the value 1. + * Encode the flock and fcntl locks for the given inode into the ceph_filelock + * array. Must be called with inode->i_lock already held. + * If we encounter more of a specific lock type than expected, return -ENOSPC. */ -int ceph_encode_locks(struct inode *inode, struct ceph_pagelist *pagelist, - int num_fcntl_locks, int num_flock_locks) +int ceph_encode_locks_to_buffer(struct inode *inode, + struct ceph_filelock *flocks, + int num_fcntl_locks, int num_flock_locks) { struct file_lock *lock; - struct ceph_filelock cephlock; int err = 0; int seen_fcntl = 0; int seen_flock = 0; + int l = 0; dout("encoding %d flock and %d fcntl locks", num_flock_locks, num_fcntl_locks); - err = ceph_pagelist_append(pagelist, &num_fcntl_locks, sizeof(u32)); - if (err) - goto fail; + for (lock = inode->i_flock; lock != NULL; lock = lock->fl_next) { if (lock->fl_flags & FL_POSIX) { ++seen_fcntl; @@ -219,19 +215,12 @@ int ceph_encode_locks(struct inode *inode, struct ceph_pagelist *pagelist, err = -ENOSPC; goto fail; } - err = lock_to_ceph_filelock(lock, &cephlock); + err = lock_to_ceph_filelock(lock, &flocks[l]); if (err) goto fail; - err = ceph_pagelist_append(pagelist, &cephlock, - sizeof(struct ceph_filelock)); + ++l; } - if (err) - goto fail; } - - err = ceph_pagelist_append(pagelist, &num_flock_locks, sizeof(u32)); - if (err) - goto fail; for (lock = inode->i_flock; lock != NULL; lock = lock->fl_next) { if (lock->fl_flags & FL_FLOCK) { ++seen_flock; @@ -239,19 +228,51 @@ int ceph_encode_locks(struct inode *inode, struct ceph_pagelist *pagelist, err = -ENOSPC; goto fail; } - err = lock_to_ceph_filelock(lock, &cephlock); + err = lock_to_ceph_filelock(lock, &flocks[l]); if (err) goto fail; - err = ceph_pagelist_append(pagelist, &cephlock, - sizeof(struct ceph_filelock)); + ++l; } - if (err) - goto fail; } fail: return err; } +/** + * Copy the encoded flock and fcntl locks into the pagelist. + * Format is: #fcntl locks, sequential fcntl locks, #flock locks, + * sequential flock locks. + * Returns zero on success. + */ +int ceph_locks_to_pagelist(struct ceph_filelock *flocks, + struct ceph_pagelist *pagelist, + int num_fcntl_locks, int num_flock_locks) +{ + int err = 0; + __le32 nlocks; + + nlocks = cpu_to_le32(num_fcntl_locks); + err = ceph_pagelist_append(pagelist, &nlocks, sizeof(nlocks)); + if (err) + goto out_fail; + + err = ceph_pagelist_append(pagelist, flocks, + num_fcntl_locks * sizeof(*flocks)); + if (err) + goto out_fail; + + nlocks = cpu_to_le32(num_flock_locks); + err = ceph_pagelist_append(pagelist, &nlocks, sizeof(nlocks)); + if (err) + goto out_fail; + + err = ceph_pagelist_append(pagelist, + &flocks[num_fcntl_locks], + num_flock_locks * sizeof(*flocks)); +out_fail: + return err; +} + /* * Given a pointer to a lock, convert it to a ceph filelock */ diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index 4f22671a5bd4..74fd2898b2ab 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -2478,39 +2478,44 @@ static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap, if (recon_state->flock) { int num_fcntl_locks, num_flock_locks; - struct ceph_pagelist_cursor trunc_point; - - ceph_pagelist_set_cursor(pagelist, &trunc_point); - do { - lock_flocks(); - ceph_count_locks(inode, &num_fcntl_locks, - &num_flock_locks); - rec.v2.flock_len = (2*sizeof(u32) + - (num_fcntl_locks+num_flock_locks) * - sizeof(struct ceph_filelock)); - unlock_flocks(); - - /* pre-alloc pagelist */ - ceph_pagelist_truncate(pagelist, &trunc_point); - err = ceph_pagelist_append(pagelist, &rec, reclen); - if (!err) - err = ceph_pagelist_reserve(pagelist, - rec.v2.flock_len); - - /* encode locks */ - if (!err) { - lock_flocks(); - err = ceph_encode_locks(inode, - pagelist, - num_fcntl_locks, - num_flock_locks); - unlock_flocks(); - } - } while (err == -ENOSPC); + struct ceph_filelock *flocks; + +encode_again: + spin_lock(&inode->i_lock); + ceph_count_locks(inode, &num_fcntl_locks, &num_flock_locks); + spin_unlock(&inode->i_lock); + flocks = kmalloc((num_fcntl_locks+num_flock_locks) * + sizeof(struct ceph_filelock), GFP_NOFS); + if (!flocks) { + err = -ENOMEM; + goto out_free; + } + spin_lock(&inode->i_lock); + err = ceph_encode_locks_to_buffer(inode, flocks, + num_fcntl_locks, + num_flock_locks); + spin_unlock(&inode->i_lock); + if (err) { + kfree(flocks); + if (err == -ENOSPC) + goto encode_again; + goto out_free; + } + /* + * number of encoded locks is stable, so copy to pagelist + */ + rec.v2.flock_len = cpu_to_le32(2*sizeof(u32) + + (num_fcntl_locks+num_flock_locks) * + sizeof(struct ceph_filelock)); + err = ceph_pagelist_append(pagelist, &rec, reclen); + if (!err) + err = ceph_locks_to_pagelist(flocks, pagelist, + num_fcntl_locks, + num_flock_locks); + kfree(flocks); } else { err = ceph_pagelist_append(pagelist, &rec, reclen); } - out_free: kfree(path); out_dput: diff --git a/fs/ceph/super.h b/fs/ceph/super.h index 8696be2ff679..7ccfdb4aea2e 100644 --- a/fs/ceph/super.h +++ b/fs/ceph/super.h @@ -822,8 +822,13 @@ extern const struct export_operations ceph_export_ops; extern int ceph_lock(struct file *file, int cmd, struct file_lock *fl); extern int ceph_flock(struct file *file, int cmd, struct file_lock *fl); extern void ceph_count_locks(struct inode *inode, int *p_num, int *f_num); -extern int ceph_encode_locks(struct inode *i, struct ceph_pagelist *p, - int p_locks, int f_locks); +extern int ceph_encode_locks_to_buffer(struct inode *inode, + struct ceph_filelock *flocks, + int num_fcntl_locks, + int num_flock_locks); +extern int ceph_locks_to_pagelist(struct ceph_filelock *flocks, + struct ceph_pagelist *pagelist, + int num_fcntl_locks, int num_flock_locks); extern int lock_to_ceph_filelock(struct file_lock *fl, struct ceph_filelock *c); /* debugfs.c */ diff --git a/fs/cifs/Kconfig b/fs/cifs/Kconfig index 2906ee276408..603f18a65c12 100644 --- a/fs/cifs/Kconfig +++ b/fs/cifs/Kconfig @@ -10,6 +10,7 @@ config CIFS select CRYPTO_ECB select CRYPTO_DES select CRYPTO_SHA256 + select CRYPTO_CMAC help This is the client VFS module for the Common Internet File System (CIFS) protocol which is the successor to the Server Message Block diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c index d59748346020..f3ac4154cbb6 100644 --- a/fs/cifs/cifs_debug.c +++ b/fs/cifs/cifs_debug.c @@ -213,7 +213,7 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v) tcon->nativeFileSystem); } seq_printf(m, "DevInfo: 0x%x Attributes: 0x%x" - "\nPathComponentMax: %d Status: 0x%d", + "\n\tPathComponentMax: %d Status: 0x%d", le32_to_cpu(tcon->fsDevInfo.DeviceCharacteristics), le32_to_cpu(tcon->fsAttrInfo.Attributes), le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength), @@ -224,6 +224,8 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v) seq_puts(m, " type: CDROM "); else seq_printf(m, " type: %d ", dev_type); + if (server->ops->dump_share_caps) + server->ops->dump_share_caps(m, tcon); if (tcon->need_reconnect) seq_puts(m, "\tDISCONNECTED "); @@ -595,9 +597,36 @@ static int cifs_security_flags_proc_open(struct inode *inode, struct file *file) return single_open(file, cifs_security_flags_proc_show, NULL); } +/* + * Ensure that if someone sets a MUST flag, that we disable all other MAY + * flags except for the ones corresponding to the given MUST flag. If there are + * multiple MUST flags, then try to prefer more secure ones. + */ +static void +cifs_security_flags_handle_must_flags(unsigned int *flags) +{ + unsigned int signflags = *flags & CIFSSEC_MUST_SIGN; + + if ((*flags & CIFSSEC_MUST_KRB5) == CIFSSEC_MUST_KRB5) + *flags = CIFSSEC_MUST_KRB5; + else if ((*flags & CIFSSEC_MUST_NTLMSSP) == CIFSSEC_MUST_NTLMSSP) + *flags = CIFSSEC_MUST_NTLMSSP; + else if ((*flags & CIFSSEC_MUST_NTLMV2) == CIFSSEC_MUST_NTLMV2) + *flags = CIFSSEC_MUST_NTLMV2; + else if ((*flags & CIFSSEC_MUST_NTLM) == CIFSSEC_MUST_NTLM) + *flags = CIFSSEC_MUST_NTLM; + else if ((*flags & CIFSSEC_MUST_LANMAN) == CIFSSEC_MUST_LANMAN) + *flags = CIFSSEC_MUST_LANMAN; + else if ((*flags & CIFSSEC_MUST_PLNTXT) == CIFSSEC_MUST_PLNTXT) + *flags = CIFSSEC_MUST_PLNTXT; + + *flags |= signflags; +} + static ssize_t cifs_security_flags_proc_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos) { + int rc; unsigned int flags; char flags_string[12]; char c; @@ -620,26 +649,35 @@ static ssize_t cifs_security_flags_proc_write(struct file *file, global_secflags = CIFSSEC_MAX; return count; } else if (!isdigit(c)) { - cifs_dbg(VFS, "invalid flag %c\n", c); + cifs_dbg(VFS, "Invalid SecurityFlags: %s\n", + flags_string); return -EINVAL; } } - /* else we have a number */ - flags = simple_strtoul(flags_string, NULL, 0); + /* else we have a number */ + rc = kstrtouint(flags_string, 0, &flags); + if (rc) { + cifs_dbg(VFS, "Invalid SecurityFlags: %s\n", + flags_string); + return rc; + } cifs_dbg(FYI, "sec flags 0x%x\n", flags); - if (flags <= 0) { - cifs_dbg(VFS, "invalid security flags %s\n", flags_string); + if (flags == 0) { + cifs_dbg(VFS, "Invalid SecurityFlags: %s\n", flags_string); return -EINVAL; } if (flags & ~CIFSSEC_MASK) { - cifs_dbg(VFS, "attempt to set unsupported security flags 0x%x\n", + cifs_dbg(VFS, "Unsupported security flags: 0x%x\n", flags & ~CIFSSEC_MASK); return -EINVAL; } + + cifs_security_flags_handle_must_flags(&flags); + /* flags look ok - update the global security flags for cifs module */ global_secflags = flags; if (global_secflags & CIFSSEC_MUST_SIGN) { diff --git a/fs/cifs/cifs_unicode.h b/fs/cifs/cifs_unicode.h index 4fb097468e21..fe8d6276410a 100644 --- a/fs/cifs/cifs_unicode.h +++ b/fs/cifs/cifs_unicode.h @@ -327,14 +327,14 @@ UniToupper(register wchar_t uc) /* * UniStrupr: Upper case a unicode string */ -static inline wchar_t * -UniStrupr(register wchar_t *upin) +static inline __le16 * +UniStrupr(register __le16 *upin) { - register wchar_t *up; + register __le16 *up; up = upin; while (*up) { /* For all characters */ - *up = UniToupper(*up); + *up = cpu_to_le16(UniToupper(le16_to_cpu(*up))); up++; } return upin; /* Return input pointer */ diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c index 71436d1fca13..3d8bf941d126 100644 --- a/fs/cifs/cifsencrypt.c +++ b/fs/cifs/cifsencrypt.c @@ -276,7 +276,6 @@ int calc_lanman_hash(const char *password, const char *cryptkey, bool encrypt, strncpy(password_with_pad, password, CIFS_ENCPWD_SIZE); if (!encrypt && global_secflags & CIFSSEC_MAY_PLNTXT) { - memset(lnm_session_key, 0, CIFS_SESS_KEY_SIZE); memcpy(lnm_session_key, password_with_pad, CIFS_ENCPWD_SIZE); return 0; @@ -414,7 +413,7 @@ static int calc_ntlmv2_hash(struct cifs_ses *ses, char *ntlmv2_hash, int rc = 0; int len; char nt_hash[CIFS_NTHASH_SIZE]; - wchar_t *user; + __le16 *user; wchar_t *domain; wchar_t *server; @@ -439,7 +438,7 @@ static int calc_ntlmv2_hash(struct cifs_ses *ses, char *ntlmv2_hash, return rc; } - /* convert ses->user_name to unicode and uppercase */ + /* convert ses->user_name to unicode */ len = ses->user_name ? strlen(ses->user_name) : 0; user = kmalloc(2 + (len * 2), GFP_KERNEL); if (user == NULL) { @@ -448,7 +447,7 @@ static int calc_ntlmv2_hash(struct cifs_ses *ses, char *ntlmv2_hash, } if (len) { - len = cifs_strtoUTF16((__le16 *)user, ses->user_name, len, nls_cp); + len = cifs_strtoUTF16(user, ses->user_name, len, nls_cp); UniStrupr(user); } else { memset(user, '\0', 2); @@ -536,7 +535,7 @@ CalcNTLMv2_response(const struct cifs_ses *ses, char *ntlmv2_hash) return rc; } - if (ses->server->secType == RawNTLMSSP) + if (ses->server->negflavor == CIFS_NEGFLAVOR_EXTENDED) memcpy(ses->auth_key.response + offset, ses->ntlmssp->cryptkey, CIFS_SERVER_CHALLENGE_SIZE); else @@ -568,7 +567,7 @@ setup_ntlmv2_rsp(struct cifs_ses *ses, const struct nls_table *nls_cp) char ntlmv2_hash[16]; unsigned char *tiblob = NULL; /* target info blob */ - if (ses->server->secType == RawNTLMSSP) { + if (ses->server->negflavor == CIFS_NEGFLAVOR_EXTENDED) { if (!ses->domainName) { rc = find_domain_name(ses, nls_cp); if (rc) { @@ -706,6 +705,9 @@ calc_seckey(struct cifs_ses *ses) void cifs_crypto_shash_release(struct TCP_Server_Info *server) { + if (server->secmech.cmacaes) + crypto_free_shash(server->secmech.cmacaes); + if (server->secmech.hmacsha256) crypto_free_shash(server->secmech.hmacsha256); @@ -715,6 +717,8 @@ cifs_crypto_shash_release(struct TCP_Server_Info *server) if (server->secmech.hmacmd5) crypto_free_shash(server->secmech.hmacmd5); + kfree(server->secmech.sdesccmacaes); + kfree(server->secmech.sdeschmacsha256); kfree(server->secmech.sdeschmacmd5); @@ -748,6 +752,13 @@ cifs_crypto_shash_allocate(struct TCP_Server_Info *server) goto crypto_allocate_hmacsha256_fail; } + server->secmech.cmacaes = crypto_alloc_shash("cmac(aes)", 0, 0); + if (IS_ERR(server->secmech.cmacaes)) { + cifs_dbg(VFS, "could not allocate crypto cmac-aes"); + rc = PTR_ERR(server->secmech.cmacaes); + goto crypto_allocate_cmacaes_fail; + } + size = sizeof(struct shash_desc) + crypto_shash_descsize(server->secmech.hmacmd5); server->secmech.sdeschmacmd5 = kmalloc(size, GFP_KERNEL); @@ -778,8 +789,22 @@ cifs_crypto_shash_allocate(struct TCP_Server_Info *server) server->secmech.sdeschmacsha256->shash.tfm = server->secmech.hmacsha256; server->secmech.sdeschmacsha256->shash.flags = 0x0; + size = sizeof(struct shash_desc) + + crypto_shash_descsize(server->secmech.cmacaes); + server->secmech.sdesccmacaes = kmalloc(size, GFP_KERNEL); + if (!server->secmech.sdesccmacaes) { + cifs_dbg(VFS, "%s: Can't alloc cmacaes\n", __func__); + rc = -ENOMEM; + goto crypto_allocate_cmacaes_sdesc_fail; + } + server->secmech.sdesccmacaes->shash.tfm = server->secmech.cmacaes; + server->secmech.sdesccmacaes->shash.flags = 0x0; + return 0; +crypto_allocate_cmacaes_sdesc_fail: + kfree(server->secmech.sdeschmacsha256); + crypto_allocate_hmacsha256_sdesc_fail: kfree(server->secmech.sdescmd5); @@ -787,6 +812,9 @@ crypto_allocate_md5_sdesc_fail: kfree(server->secmech.sdeschmacmd5); crypto_allocate_hmacmd5_sdesc_fail: + crypto_free_shash(server->secmech.cmacaes); + +crypto_allocate_cmacaes_fail: crypto_free_shash(server->secmech.hmacsha256); crypto_allocate_hmacsha256_fail: diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index 3752b9f6d9e4..4bdd547dbf6f 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c @@ -312,11 +312,14 @@ cifs_show_address(struct seq_file *s, struct TCP_Server_Info *server) } static void -cifs_show_security(struct seq_file *s, struct TCP_Server_Info *server) +cifs_show_security(struct seq_file *s, struct cifs_ses *ses) { + if (ses->sectype == Unspecified) + return; + seq_printf(s, ",sec="); - switch (server->secType) { + switch (ses->sectype) { case LANMAN: seq_printf(s, "lanman"); break; @@ -338,7 +341,7 @@ cifs_show_security(struct seq_file *s, struct TCP_Server_Info *server) break; } - if (server->sec_mode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) + if (ses->sign) seq_printf(s, "i"); } @@ -369,7 +372,7 @@ cifs_show_options(struct seq_file *s, struct dentry *root) srcaddr = (struct sockaddr *)&tcon->ses->server->srcaddr; seq_printf(s, ",vers=%s", tcon->ses->server->vals->version_string); - cifs_show_security(s, tcon->ses->server); + cifs_show_security(s, tcon->ses); cifs_show_cache_flavor(s, cifs_sb); if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER) @@ -765,7 +768,7 @@ static loff_t cifs_llseek(struct file *file, loff_t offset, int whence) static int cifs_setlease(struct file *file, long arg, struct file_lock **lease) { - /* note that this is called by vfs setlease with lock_flocks held + /* note that this is called by vfs setlease with i_lock held to protect *lease from going away */ struct inode *inode = file_inode(file); struct cifsFileInfo *cfile = file->private_data; @@ -968,7 +971,7 @@ const struct file_operations cifs_file_direct_nobrl_ops = { }; const struct file_operations cifs_dir_ops = { - .readdir = cifs_readdir, + .iterate = cifs_readdir, .release = cifs_closedir, .read = generic_read_dir, .unlocked_ioctl = cifs_ioctl, diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h index 0e32c3446ce9..ea723a5e8226 100644 --- a/fs/cifs/cifsfs.h +++ b/fs/cifs/cifsfs.h @@ -101,7 +101,7 @@ extern int cifs_file_mmap(struct file * , struct vm_area_struct *); extern int cifs_file_strict_mmap(struct file * , struct vm_area_struct *); extern const struct file_operations cifs_dir_ops; extern int cifs_dir_open(struct inode *inode, struct file *file); -extern int cifs_readdir(struct file *file, void *direntry, filldir_t filldir); +extern int cifs_readdir(struct file *file, struct dir_context *ctx); /* Functions related to dir entries */ extern const struct dentry_operations cifs_dentry_ops; @@ -132,5 +132,5 @@ extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg); extern const struct export_operations cifs_export_ops; #endif /* CONFIG_CIFS_NFSD_EXPORT */ -#define CIFS_VERSION "2.0" +#define CIFS_VERSION "2.01" #endif /* _CIFSFS_H */ diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index 4f07f6fbe494..e66b08882548 100644 --- a/fs/cifs/cifsglob.h +++ b/fs/cifs/cifsglob.h @@ -101,20 +101,14 @@ enum statusEnum { }; enum securityEnum { - LANMAN = 0, /* Legacy LANMAN auth */ + Unspecified = 0, /* not specified */ + LANMAN, /* Legacy LANMAN auth */ NTLM, /* Legacy NTLM012 auth with NTLM hash */ NTLMv2, /* Legacy NTLM auth with NTLMv2 hash */ RawNTLMSSP, /* NTLMSSP without SPNEGO, NTLMv2 hash */ -/* NTLMSSP, */ /* can use rawNTLMSSP instead of NTLMSSP via SPNEGO */ Kerberos, /* Kerberos via SPNEGO */ }; -enum protocolEnum { - TCP = 0, - SCTP - /* Netbios frames protocol not supported at this time */ -}; - struct session_key { unsigned int len; char *response; @@ -131,9 +125,11 @@ struct cifs_secmech { struct crypto_shash *hmacmd5; /* hmac-md5 hash function */ struct crypto_shash *md5; /* md5 hash function */ struct crypto_shash *hmacsha256; /* hmac-sha256 hash function */ + struct crypto_shash *cmacaes; /* block-cipher based MAC function */ struct sdesc *sdeschmacmd5; /* ctxt to generate ntlmv2 hash, CR1 */ struct sdesc *sdescmd5; /* ctxt to generate cifs/smb signature */ struct sdesc *sdeschmacsha256; /* ctxt to generate smb2 signature */ + struct sdesc *sdesccmacaes; /* ctxt to generate smb3 signature */ }; /* per smb session structure/fields */ @@ -181,6 +177,7 @@ enum smb_version { Smb_20, Smb_21, Smb_30, + Smb_302, }; struct mid_q_entry; @@ -228,6 +225,7 @@ struct smb_version_operations { void (*dump_detail)(void *); void (*clear_stats)(struct cifs_tcon *); void (*print_stats)(struct seq_file *m, struct cifs_tcon *); + void (*dump_share_caps)(struct seq_file *, struct cifs_tcon *); /* verify the message */ int (*check_message)(char *, unsigned int); bool (*is_oplock_break)(char *, struct TCP_Server_Info *); @@ -367,6 +365,8 @@ struct smb_version_operations { void (*set_lease_key)(struct inode *, struct cifs_fid *fid); /* generate new lease key */ void (*new_lease_key)(struct cifs_fid *fid); + /* The next two functions will need to be changed to per smb session */ + void (*generate_signingkey)(struct TCP_Server_Info *server); int (*calc_signature)(struct smb_rqst *rqst, struct TCP_Server_Info *server); }; @@ -387,6 +387,8 @@ struct smb_version_values { unsigned int cap_nt_find; unsigned int cap_large_files; unsigned int oplock_read; + __u16 signing_enabled; + __u16 signing_required; }; #define HEADER_SIZE(server) (server->vals->header_size) @@ -407,7 +409,8 @@ struct smb_vol { kgid_t backupgid; umode_t file_mode; umode_t dir_mode; - unsigned secFlg; + enum securityEnum sectype; /* sectype requested via mnt opts */ + bool sign; /* was signing requested via mnt opts? */ bool retry:1; bool intr:1; bool setuids:1; @@ -441,6 +444,7 @@ struct smb_vol { bool mfsymlinks:1; /* use Minshall+French Symlinks */ bool multiuser:1; bool rwpidforward:1; /* pid forward for read/write operations */ + bool nosharesock; unsigned int rsize; unsigned int wsize; bool sockopt_tcp_nodelay:1; @@ -514,6 +518,7 @@ struct TCP_Server_Info { struct task_struct *tsk; char server_GUID[16]; __u16 sec_mode; + bool sign; /* is signing enabled on this connection? */ bool session_estab; /* mark when very first sess is established */ #ifdef CONFIG_CIFS_SMB2 int echo_credits; /* echo reserved slots */ @@ -521,7 +526,6 @@ struct TCP_Server_Info { bool echoes:1; /* enable echoes */ #endif u16 dialect; /* dialect index that server chose */ - enum securityEnum secType; bool oplocks:1; /* enable oplocks */ unsigned int maxReq; /* Clients should submit no more */ /* than maxReq distinct unanswered SMBs to the server when using */ @@ -540,12 +544,17 @@ struct TCP_Server_Info { int timeAdj; /* Adjust for difference in server time zone in sec */ __u64 CurrentMid; /* multiplex id - rotating counter */ char cryptkey[CIFS_CRYPTO_KEY_SIZE]; /* used by ntlm, ntlmv2 etc */ + char smb3signingkey[SMB3_SIGN_KEY_SIZE]; /* for signing smb3 packets */ /* 16th byte of RFC1001 workstation name is always null */ char workstation_RFC1001_name[RFC1001_NAME_LEN_WITH_NULL]; __u32 sequence_number; /* for signing, protected by srv_mutex */ struct session_key session_key; unsigned long lstrp; /* when we got last response from this server */ struct cifs_secmech secmech; /* crypto sec mech functs, descriptors */ +#define CIFS_NEGFLAVOR_LANMAN 0 /* wct == 13, LANMAN */ +#define CIFS_NEGFLAVOR_UNENCAP 1 /* wct == 17, but no ext_sec */ +#define CIFS_NEGFLAVOR_EXTENDED 2 /* wct == 17, ext_sec bit set */ + char negflavor; /* NEGOTIATE response flavor */ /* extended security flavors that server supports */ bool sec_ntlmssp; /* supports NTLMSSP */ bool sec_kerberosu2u; /* supports U2U Kerberos */ @@ -697,7 +706,6 @@ struct cifs_ses { enum statusEnum status; unsigned overrideSecFlg; /* if non-zero override global sec flags */ __u16 ipc_tid; /* special tid for connection to IPC share */ - __u16 flags; __u16 vcnum; char *serverOS; /* name of operating system underlying server */ char *serverNOS; /* name of network operating system of server */ @@ -714,21 +722,14 @@ struct cifs_ses { char *password; struct session_key auth_key; struct ntlmssp_auth *ntlmssp; /* ciphertext, flags, server challenge */ + enum securityEnum sectype; /* what security flavor was specified? */ + bool sign; /* is signing required? */ bool need_reconnect:1; /* connection reset, uid now invalid */ #ifdef CONFIG_CIFS_SMB2 __u16 session_flags; #endif /* CONFIG_CIFS_SMB2 */ }; -/* no more than one of the following three session flags may be set */ -#define CIFS_SES_NT4 1 -#define CIFS_SES_OS2 2 -#define CIFS_SES_W9X 4 -/* following flag is set for old servers such as OS2 (and Win95?) - which do not negotiate NTLM or POSIX dialects, but instead - negotiate one of the older LANMAN dialects */ -#define CIFS_SES_LANMAN 8 - static inline bool cap_unix(struct cifs_ses *ses) { @@ -816,7 +817,7 @@ struct cifs_tcon { #ifdef CONFIG_CIFS_SMB2 bool print:1; /* set if connection to printer share */ bool bad_network_name:1; /* set if ret status STATUS_BAD_NETWORK_NAME */ - __u32 capabilities; + __le32 capabilities; __u32 share_flags; __u32 maximal_access; __u32 vol_serial_number; @@ -1348,7 +1349,7 @@ require use of the stronger protocol */ #define CIFSSEC_MUST_SEAL 0x40040 /* not supported yet */ #define CIFSSEC_MUST_NTLMSSP 0x80080 /* raw ntlmssp with ntlmv2 */ -#define CIFSSEC_DEF (CIFSSEC_MAY_SIGN | CIFSSEC_MAY_NTLMSSP) +#define CIFSSEC_DEF (CIFSSEC_MAY_SIGN | CIFSSEC_MAY_NTLMV2 | CIFSSEC_MAY_NTLMSSP) #define CIFSSEC_MAX (CIFSSEC_MUST_SIGN | CIFSSEC_MUST_NTLMV2) #define CIFSSEC_AUTH_MASK (CIFSSEC_MAY_NTLM | CIFSSEC_MAY_NTLMV2 | CIFSSEC_MAY_LANMAN | CIFSSEC_MAY_PLNTXT | CIFSSEC_MAY_KRB5 | CIFSSEC_MAY_NTLMSSP) /* @@ -1494,4 +1495,7 @@ extern struct smb_version_values smb21_values; #define SMB30_VERSION_STRING "3.0" extern struct smb_version_operations smb30_operations; extern struct smb_version_values smb30_values; +#define SMB302_VERSION_STRING "3.02" +/*extern struct smb_version_operations smb302_operations;*/ /* not needed yet */ +extern struct smb_version_values smb302_values; #endif /* _CIFS_GLOB_H */ diff --git a/fs/cifs/cifspdu.h b/fs/cifs/cifspdu.h index e996ff6b26d1..11ca24a8e054 100644 --- a/fs/cifs/cifspdu.h +++ b/fs/cifs/cifspdu.h @@ -142,6 +142,11 @@ */ #define CIFS_SESS_KEY_SIZE (16) +/* + * Size of the smb3 signing key + */ +#define SMB3_SIGN_KEY_SIZE (16) + #define CIFS_CLIENT_CHALLENGE_SIZE (8) #define CIFS_SERVER_CHALLENGE_SIZE (8) #define CIFS_HMAC_MD5_HASH_SIZE (16) @@ -531,7 +536,7 @@ typedef struct lanman_neg_rsp { #define READ_RAW_ENABLE 1 #define WRITE_RAW_ENABLE 2 #define RAW_ENABLE (READ_RAW_ENABLE | WRITE_RAW_ENABLE) - +#define SMB1_CLIENT_GUID_SIZE (16) typedef struct negotiate_rsp { struct smb_hdr hdr; /* wct = 17 */ __le16 DialectIndex; /* 0xFFFF = no dialect acceptable */ @@ -553,7 +558,7 @@ typedef struct negotiate_rsp { /* followed by 16 bytes of server GUID */ /* then security blob if cap_extended_security negotiated */ struct { - unsigned char GUID[16]; + unsigned char GUID[SMB1_CLIENT_GUID_SIZE]; unsigned char SecurityBlob[1]; } __attribute__((packed)) extended_response; } __attribute__((packed)) u; @@ -1315,6 +1320,14 @@ typedef struct smb_com_ntransact_rsp { /* parms and data follow */ } __attribute__((packed)) NTRANSACT_RSP; +/* See MS-SMB 2.2.7.2.1.1 */ +struct srv_copychunk { + __le64 SourceOffset; + __le64 DestinationOffset; + __le32 CopyLength; + __u32 Reserved; +} __packed; + typedef struct smb_com_transaction_ioctl_req { struct smb_hdr hdr; /* wct = 23 */ __u8 MaxSetupCount; diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h index dda188a94332..c8ff018fae68 100644 --- a/fs/cifs/cifsproto.h +++ b/fs/cifs/cifsproto.h @@ -118,6 +118,8 @@ extern void header_assemble(struct smb_hdr *, char /* command */ , extern int small_smb_init_no_tc(const int smb_cmd, const int wct, struct cifs_ses *ses, void **request_buf); +extern enum securityEnum select_sectype(struct TCP_Server_Info *server, + enum securityEnum requested); extern int CIFS_SessSetup(const unsigned int xid, struct cifs_ses *ses, const struct nls_table *nls_cp); extern struct timespec cifs_NTtimeToUnix(__le64 utc_nanoseconds_since_1601); @@ -212,6 +214,7 @@ extern int cifs_negotiate_protocol(const unsigned int xid, struct cifs_ses *ses); extern int cifs_setup_session(const unsigned int xid, struct cifs_ses *ses, struct nls_table *nls_info); +extern int cifs_enable_signing(struct TCP_Server_Info *server, bool mnt_sign_required); extern int CIFSSMBNegotiate(const unsigned int xid, struct cifs_ses *ses); extern int CIFSTCon(const unsigned int xid, struct cifs_ses *ses, @@ -433,6 +436,7 @@ extern int setup_ntlmv2_rsp(struct cifs_ses *, const struct nls_table *); extern int cifs_crypto_shash_allocate(struct TCP_Server_Info *); extern void cifs_crypto_shash_release(struct TCP_Server_Info *); extern int calc_seckey(struct cifs_ses *); +extern void generate_smb3signingkey(struct TCP_Server_Info *); #ifdef CONFIG_CIFS_WEAK_PW_HASH extern int calc_lanman_hash(const char *password, const char *cryptkey, diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c index a58dc77cc443..a89c4cb4e6cf 100644 --- a/fs/cifs/cifssmb.c +++ b/fs/cifs/cifssmb.c @@ -367,6 +367,185 @@ vt2_err: return -EINVAL; } +static int +decode_ext_sec_blob(struct cifs_ses *ses, NEGOTIATE_RSP *pSMBr) +{ + int rc = 0; + u16 count; + char *guid = pSMBr->u.extended_response.GUID; + struct TCP_Server_Info *server = ses->server; + + count = get_bcc(&pSMBr->hdr); + if (count < SMB1_CLIENT_GUID_SIZE) + return -EIO; + + spin_lock(&cifs_tcp_ses_lock); + if (server->srv_count > 1) { + spin_unlock(&cifs_tcp_ses_lock); + if (memcmp(server->server_GUID, guid, SMB1_CLIENT_GUID_SIZE) != 0) { + cifs_dbg(FYI, "server UID changed\n"); + memcpy(server->server_GUID, guid, SMB1_CLIENT_GUID_SIZE); + } + } else { + spin_unlock(&cifs_tcp_ses_lock); + memcpy(server->server_GUID, guid, SMB1_CLIENT_GUID_SIZE); + } + + if (count == SMB1_CLIENT_GUID_SIZE) { + server->sec_ntlmssp = true; + } else { + count -= SMB1_CLIENT_GUID_SIZE; + rc = decode_negTokenInit( + pSMBr->u.extended_response.SecurityBlob, count, server); + if (rc != 1) + return -EINVAL; + } + + return 0; +} + +int +cifs_enable_signing(struct TCP_Server_Info *server, bool mnt_sign_required) +{ + bool srv_sign_required = server->sec_mode & server->vals->signing_required; + bool srv_sign_enabled = server->sec_mode & server->vals->signing_enabled; + bool mnt_sign_enabled = global_secflags & CIFSSEC_MAY_SIGN; + + /* + * Is signing required by mnt options? If not then check + * global_secflags to see if it is there. + */ + if (!mnt_sign_required) + mnt_sign_required = ((global_secflags & CIFSSEC_MUST_SIGN) == + CIFSSEC_MUST_SIGN); + + /* + * If signing is required then it's automatically enabled too, + * otherwise, check to see if the secflags allow it. + */ + mnt_sign_enabled = mnt_sign_required ? mnt_sign_required : + (global_secflags & CIFSSEC_MAY_SIGN); + + /* If server requires signing, does client allow it? */ + if (srv_sign_required) { + if (!mnt_sign_enabled) { + cifs_dbg(VFS, "Server requires signing, but it's disabled in SecurityFlags!"); + return -ENOTSUPP; + } + server->sign = true; + } + + /* If client requires signing, does server allow it? */ + if (mnt_sign_required) { + if (!srv_sign_enabled) { + cifs_dbg(VFS, "Server does not support signing!"); + return -ENOTSUPP; + } + server->sign = true; + } + + return 0; +} + +#ifdef CONFIG_CIFS_WEAK_PW_HASH +static int +decode_lanman_negprot_rsp(struct TCP_Server_Info *server, NEGOTIATE_RSP *pSMBr) +{ + __s16 tmp; + struct lanman_neg_rsp *rsp = (struct lanman_neg_rsp *)pSMBr; + + if (server->dialect != LANMAN_PROT && server->dialect != LANMAN2_PROT) + return -EOPNOTSUPP; + + server->sec_mode = le16_to_cpu(rsp->SecurityMode); + server->maxReq = min_t(unsigned int, + le16_to_cpu(rsp->MaxMpxCount), + cifs_max_pending); + set_credits(server, server->maxReq); + server->maxBuf = le16_to_cpu(rsp->MaxBufSize); + server->max_vcs = le16_to_cpu(rsp->MaxNumberVcs); + /* even though we do not use raw we might as well set this + accurately, in case we ever find a need for it */ + if ((le16_to_cpu(rsp->RawMode) & RAW_ENABLE) == RAW_ENABLE) { + server->max_rw = 0xFF00; + server->capabilities = CAP_MPX_MODE | CAP_RAW_MODE; + } else { + server->max_rw = 0;/* do not need to use raw anyway */ + server->capabilities = CAP_MPX_MODE; + } + tmp = (__s16)le16_to_cpu(rsp->ServerTimeZone); + if (tmp == -1) { + /* OS/2 often does not set timezone therefore + * we must use server time to calc time zone. + * Could deviate slightly from the right zone. + * Smallest defined timezone difference is 15 minutes + * (i.e. Nepal). Rounding up/down is done to match + * this requirement. + */ + int val, seconds, remain, result; + struct timespec ts, utc; + utc = CURRENT_TIME; + ts = cnvrtDosUnixTm(rsp->SrvTime.Date, + rsp->SrvTime.Time, 0); + cifs_dbg(FYI, "SrvTime %d sec since 1970 (utc: %d) diff: %d\n", + (int)ts.tv_sec, (int)utc.tv_sec, + (int)(utc.tv_sec - ts.tv_sec)); + val = (int)(utc.tv_sec - ts.tv_sec); + seconds = abs(val); + result = (seconds / MIN_TZ_ADJ) * MIN_TZ_ADJ; + remain = seconds % MIN_TZ_ADJ; + if (remain >= (MIN_TZ_ADJ / 2)) + result += MIN_TZ_ADJ; + if (val < 0) + result = -result; + server->timeAdj = result; + } else { + server->timeAdj = (int)tmp; + server->timeAdj *= 60; /* also in seconds */ + } + cifs_dbg(FYI, "server->timeAdj: %d seconds\n", server->timeAdj); + + + /* BB get server time for time conversions and add + code to use it and timezone since this is not UTC */ + + if (rsp->EncryptionKeyLength == + cpu_to_le16(CIFS_CRYPTO_KEY_SIZE)) { + memcpy(server->cryptkey, rsp->EncryptionKey, + CIFS_CRYPTO_KEY_SIZE); + } else if (server->sec_mode & SECMODE_PW_ENCRYPT) { + return -EIO; /* need cryptkey unless plain text */ + } + + cifs_dbg(FYI, "LANMAN negotiated\n"); + return 0; +} +#else +static inline int +decode_lanman_negprot_rsp(struct TCP_Server_Info *server, NEGOTIATE_RSP *pSMBr) +{ + cifs_dbg(VFS, "mount failed, cifs module not built with CIFS_WEAK_PW_HASH support\n"); + return -EOPNOTSUPP; +} +#endif + +static bool +should_set_ext_sec_flag(enum securityEnum sectype) +{ + switch (sectype) { + case RawNTLMSSP: + case Kerberos: + return true; + case Unspecified: + if (global_secflags & + (CIFSSEC_MAY_KRB5 | CIFSSEC_MAY_NTLMSSP)) + return true; + /* Fallthrough */ + default: + return false; + } +} + int CIFSSMBNegotiate(const unsigned int xid, struct cifs_ses *ses) { @@ -375,41 +554,24 @@ CIFSSMBNegotiate(const unsigned int xid, struct cifs_ses *ses) int rc = 0; int bytes_returned; int i; - struct TCP_Server_Info *server; + struct TCP_Server_Info *server = ses->server; u16 count; - unsigned int secFlags; - if (ses->server) - server = ses->server; - else { - rc = -EIO; - return rc; + if (!server) { + WARN(1, "%s: server is NULL!\n", __func__); + return -EIO; } + rc = smb_init(SMB_COM_NEGOTIATE, 0, NULL /* no tcon yet */ , (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; - /* if any of auth flags (ie not sign or seal) are overriden use them */ - if (ses->overrideSecFlg & (~(CIFSSEC_MUST_SIGN | CIFSSEC_MUST_SEAL))) - secFlags = ses->overrideSecFlg; /* BB FIXME fix sign flags? */ - else /* if override flags set only sign/seal OR them with global auth */ - secFlags = global_secflags | ses->overrideSecFlg; - - cifs_dbg(FYI, "secFlags 0x%x\n", secFlags); - pSMB->hdr.Mid = get_next_mid(server); pSMB->hdr.Flags2 |= (SMBFLG2_UNICODE | SMBFLG2_ERR_STATUS); - if ((secFlags & CIFSSEC_MUST_KRB5) == CIFSSEC_MUST_KRB5) - pSMB->hdr.Flags2 |= SMBFLG2_EXT_SEC; - else if ((secFlags & CIFSSEC_AUTH_MASK) == CIFSSEC_MAY_KRB5) { - cifs_dbg(FYI, "Kerberos only mechanism, enable extended security\n"); - pSMB->hdr.Flags2 |= SMBFLG2_EXT_SEC; - } else if ((secFlags & CIFSSEC_MUST_NTLMSSP) == CIFSSEC_MUST_NTLMSSP) - pSMB->hdr.Flags2 |= SMBFLG2_EXT_SEC; - else if ((secFlags & CIFSSEC_AUTH_MASK) == CIFSSEC_MAY_NTLMSSP) { - cifs_dbg(FYI, "NTLMSSP only mechanism, enable extended security\n"); + if (should_set_ext_sec_flag(ses->sectype)) { + cifs_dbg(FYI, "Requesting extended security."); pSMB->hdr.Flags2 |= SMBFLG2_EXT_SEC; } @@ -436,127 +598,21 @@ CIFSSMBNegotiate(const unsigned int xid, struct cifs_ses *ses) could not negotiate a common dialect */ rc = -EOPNOTSUPP; goto neg_err_exit; -#ifdef CONFIG_CIFS_WEAK_PW_HASH - } else if ((pSMBr->hdr.WordCount == 13) - && ((server->dialect == LANMAN_PROT) - || (server->dialect == LANMAN2_PROT))) { - __s16 tmp; - struct lanman_neg_rsp *rsp = (struct lanman_neg_rsp *)pSMBr; - - if ((secFlags & CIFSSEC_MAY_LANMAN) || - (secFlags & CIFSSEC_MAY_PLNTXT)) - server->secType = LANMAN; - else { - cifs_dbg(VFS, "mount failed weak security disabled in /proc/fs/cifs/SecurityFlags\n"); - rc = -EOPNOTSUPP; - goto neg_err_exit; - } - server->sec_mode = le16_to_cpu(rsp->SecurityMode); - server->maxReq = min_t(unsigned int, - le16_to_cpu(rsp->MaxMpxCount), - cifs_max_pending); - set_credits(server, server->maxReq); - server->maxBuf = le16_to_cpu(rsp->MaxBufSize); - server->max_vcs = le16_to_cpu(rsp->MaxNumberVcs); - /* even though we do not use raw we might as well set this - accurately, in case we ever find a need for it */ - if ((le16_to_cpu(rsp->RawMode) & RAW_ENABLE) == RAW_ENABLE) { - server->max_rw = 0xFF00; - server->capabilities = CAP_MPX_MODE | CAP_RAW_MODE; - } else { - server->max_rw = 0;/* do not need to use raw anyway */ - server->capabilities = CAP_MPX_MODE; - } - tmp = (__s16)le16_to_cpu(rsp->ServerTimeZone); - if (tmp == -1) { - /* OS/2 often does not set timezone therefore - * we must use server time to calc time zone. - * Could deviate slightly from the right zone. - * Smallest defined timezone difference is 15 minutes - * (i.e. Nepal). Rounding up/down is done to match - * this requirement. - */ - int val, seconds, remain, result; - struct timespec ts, utc; - utc = CURRENT_TIME; - ts = cnvrtDosUnixTm(rsp->SrvTime.Date, - rsp->SrvTime.Time, 0); - cifs_dbg(FYI, "SrvTime %d sec since 1970 (utc: %d) diff: %d\n", - (int)ts.tv_sec, (int)utc.tv_sec, - (int)(utc.tv_sec - ts.tv_sec)); - val = (int)(utc.tv_sec - ts.tv_sec); - seconds = abs(val); - result = (seconds / MIN_TZ_ADJ) * MIN_TZ_ADJ; - remain = seconds % MIN_TZ_ADJ; - if (remain >= (MIN_TZ_ADJ / 2)) - result += MIN_TZ_ADJ; - if (val < 0) - result = -result; - server->timeAdj = result; - } else { - server->timeAdj = (int)tmp; - server->timeAdj *= 60; /* also in seconds */ - } - cifs_dbg(FYI, "server->timeAdj: %d seconds\n", server->timeAdj); - - - /* BB get server time for time conversions and add - code to use it and timezone since this is not UTC */ - - if (rsp->EncryptionKeyLength == - cpu_to_le16(CIFS_CRYPTO_KEY_SIZE)) { - memcpy(ses->server->cryptkey, rsp->EncryptionKey, - CIFS_CRYPTO_KEY_SIZE); - } else if (server->sec_mode & SECMODE_PW_ENCRYPT) { - rc = -EIO; /* need cryptkey unless plain text */ - goto neg_err_exit; - } - - cifs_dbg(FYI, "LANMAN negotiated\n"); - /* we will not end up setting signing flags - as no signing - was in LANMAN and server did not return the flags on */ - goto signing_check; -#else /* weak security disabled */ } else if (pSMBr->hdr.WordCount == 13) { - cifs_dbg(VFS, "mount failed, cifs module not built with CIFS_WEAK_PW_HASH support\n"); - rc = -EOPNOTSUPP; -#endif /* WEAK_PW_HASH */ - goto neg_err_exit; + server->negflavor = CIFS_NEGFLAVOR_LANMAN; + rc = decode_lanman_negprot_rsp(server, pSMBr); + goto signing_check; } else if (pSMBr->hdr.WordCount != 17) { /* unknown wct */ rc = -EOPNOTSUPP; goto neg_err_exit; } - /* else wct == 17 NTLM */ + /* else wct == 17, NTLM or better */ + server->sec_mode = pSMBr->SecurityMode; if ((server->sec_mode & SECMODE_USER) == 0) cifs_dbg(FYI, "share mode security\n"); - if ((server->sec_mode & SECMODE_PW_ENCRYPT) == 0) -#ifdef CONFIG_CIFS_WEAK_PW_HASH - if ((secFlags & CIFSSEC_MAY_PLNTXT) == 0) -#endif /* CIFS_WEAK_PW_HASH */ - cifs_dbg(VFS, "Server requests plain text password but client support disabled\n"); - - if ((secFlags & CIFSSEC_MUST_NTLMV2) == CIFSSEC_MUST_NTLMV2) - server->secType = NTLMv2; - else if (secFlags & CIFSSEC_MAY_NTLM) - server->secType = NTLM; - else if (secFlags & CIFSSEC_MAY_NTLMV2) - server->secType = NTLMv2; - else if (secFlags & CIFSSEC_MAY_KRB5) - server->secType = Kerberos; - else if (secFlags & CIFSSEC_MAY_NTLMSSP) - server->secType = RawNTLMSSP; - else if (secFlags & CIFSSEC_MAY_LANMAN) - server->secType = LANMAN; - else { - rc = -EOPNOTSUPP; - cifs_dbg(VFS, "Invalid security type\n"); - goto neg_err_exit; - } - /* else ... any others ...? */ - /* one byte, so no need to convert this or EncryptionKeyLen from little endian */ server->maxReq = min_t(unsigned int, le16_to_cpu(pSMBr->MaxMpxCount), @@ -569,90 +625,26 @@ CIFSSMBNegotiate(const unsigned int xid, struct cifs_ses *ses) server->capabilities = le32_to_cpu(pSMBr->Capabilities); server->timeAdj = (int)(__s16)le16_to_cpu(pSMBr->ServerTimeZone); server->timeAdj *= 60; + if (pSMBr->EncryptionKeyLength == CIFS_CRYPTO_KEY_SIZE) { + server->negflavor = CIFS_NEGFLAVOR_UNENCAP; memcpy(ses->server->cryptkey, pSMBr->u.EncryptionKey, CIFS_CRYPTO_KEY_SIZE); } else if ((pSMBr->hdr.Flags2 & SMBFLG2_EXT_SEC || server->capabilities & CAP_EXTENDED_SECURITY) && (pSMBr->EncryptionKeyLength == 0)) { - /* decode security blob */ - count = get_bcc(&pSMBr->hdr); - if (count < 16) { - rc = -EIO; - goto neg_err_exit; - } - spin_lock(&cifs_tcp_ses_lock); - if (server->srv_count > 1) { - spin_unlock(&cifs_tcp_ses_lock); - if (memcmp(server->server_GUID, - pSMBr->u.extended_response. - GUID, 16) != 0) { - cifs_dbg(FYI, "server UID changed\n"); - memcpy(server->server_GUID, - pSMBr->u.extended_response.GUID, - 16); - } - } else { - spin_unlock(&cifs_tcp_ses_lock); - memcpy(server->server_GUID, - pSMBr->u.extended_response.GUID, 16); - } - - if (count == 16) { - server->secType = RawNTLMSSP; - } else { - rc = decode_negTokenInit(pSMBr->u.extended_response. - SecurityBlob, count - 16, - server); - if (rc == 1) - rc = 0; - else - rc = -EINVAL; - if (server->secType == Kerberos) { - if (!server->sec_kerberos && - !server->sec_mskerberos) - rc = -EOPNOTSUPP; - } else if (server->secType == RawNTLMSSP) { - if (!server->sec_ntlmssp) - rc = -EOPNOTSUPP; - } else - rc = -EOPNOTSUPP; - } + server->negflavor = CIFS_NEGFLAVOR_EXTENDED; + rc = decode_ext_sec_blob(ses, pSMBr); } else if (server->sec_mode & SECMODE_PW_ENCRYPT) { rc = -EIO; /* no crypt key only if plain text pwd */ - goto neg_err_exit; - } else - server->capabilities &= ~CAP_EXTENDED_SECURITY; - -#ifdef CONFIG_CIFS_WEAK_PW_HASH -signing_check: -#endif - if ((secFlags & CIFSSEC_MAY_SIGN) == 0) { - /* MUST_SIGN already includes the MAY_SIGN FLAG - so if this is zero it means that signing is disabled */ - cifs_dbg(FYI, "Signing disabled\n"); - if (server->sec_mode & SECMODE_SIGN_REQUIRED) { - cifs_dbg(VFS, "Server requires packet signing to be enabled in /proc/fs/cifs/SecurityFlags\n"); - rc = -EOPNOTSUPP; - } - server->sec_mode &= - ~(SECMODE_SIGN_ENABLED | SECMODE_SIGN_REQUIRED); - } else if ((secFlags & CIFSSEC_MUST_SIGN) == CIFSSEC_MUST_SIGN) { - /* signing required */ - cifs_dbg(FYI, "Must sign - secFlags 0x%x\n", secFlags); - if ((server->sec_mode & - (SECMODE_SIGN_ENABLED | SECMODE_SIGN_REQUIRED)) == 0) { - cifs_dbg(VFS, "signing required but server lacks support\n"); - rc = -EOPNOTSUPP; - } else - server->sec_mode |= SECMODE_SIGN_REQUIRED; } else { - /* signing optional ie CIFSSEC_MAY_SIGN */ - if ((server->sec_mode & SECMODE_SIGN_REQUIRED) == 0) - server->sec_mode &= - ~(SECMODE_SIGN_ENABLED | SECMODE_SIGN_REQUIRED); + server->negflavor = CIFS_NEGFLAVOR_UNENCAP; + server->capabilities &= ~CAP_EXTENDED_SECURITY; } +signing_check: + if (!rc) + rc = cifs_enable_signing(server, ses->sign); neg_err_exit: cifs_buf_release(pSMB); @@ -777,9 +769,8 @@ CIFSSMBLogoff(const unsigned int xid, struct cifs_ses *ses) pSMB->hdr.Mid = get_next_mid(ses->server); - if (ses->server->sec_mode & - (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) - pSMB->hdr.Flags2 |= SMBFLG2_SECURITY_SIGNATURE; + if (ses->server->sign) + pSMB->hdr.Flags2 |= SMBFLG2_SECURITY_SIGNATURE; pSMB->hdr.Uid = ses->Suid; @@ -1540,8 +1531,7 @@ cifs_readv_callback(struct mid_q_entry *mid) switch (mid->mid_state) { case MID_RESPONSE_RECEIVED: /* result already set, check signature */ - if (server->sec_mode & - (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) { + if (server->sign) { int rc = 0; rc = cifs_verify_signature(&rqst, server, @@ -3940,6 +3930,7 @@ QFileInfoRetry: pSMB->Pad = 0; pSMB->Fid = netfid; inc_rfc1001_len(pSMB, byte_count); + pSMB->t2.ByteCount = cpu_to_le16(byte_count); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); @@ -4108,6 +4099,7 @@ UnixQFileInfoRetry: pSMB->Pad = 0; pSMB->Fid = netfid; inc_rfc1001_len(pSMB, byte_count); + pSMB->t2.ByteCount = cpu_to_le16(byte_count); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); @@ -4794,11 +4786,8 @@ getDFSRetry: strncpy(pSMB->RequestFileName, search_name, name_len); } - if (ses->server) { - if (ses->server->sec_mode & - (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) - pSMB->hdr.Flags2 |= SMBFLG2_SECURITY_SIGNATURE; - } + if (ses->server && ses->server->sign) + pSMB->hdr.Flags2 |= SMBFLG2_SECURITY_SIGNATURE; pSMB->hdr.Uid = ses->Suid; diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index e3bc39bb9d12..afcb8a1a33b7 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -85,7 +85,7 @@ enum { Opt_acl, Opt_noacl, Opt_locallease, Opt_sign, Opt_seal, Opt_noac, Opt_fsc, Opt_mfsymlinks, - Opt_multiuser, Opt_sloppy, + Opt_multiuser, Opt_sloppy, Opt_nosharesock, /* Mount options which take numeric value */ Opt_backupuid, Opt_backupgid, Opt_uid, @@ -165,6 +165,7 @@ static const match_table_t cifs_mount_option_tokens = { { Opt_mfsymlinks, "mfsymlinks" }, { Opt_multiuser, "multiuser" }, { Opt_sloppy, "sloppy" }, + { Opt_nosharesock, "nosharesock" }, { Opt_backupuid, "backupuid=%s" }, { Opt_backupgid, "backupgid=%s" }, @@ -275,6 +276,7 @@ static const match_table_t cifs_smb_version_tokens = { { Smb_20, SMB20_VERSION_STRING}, { Smb_21, SMB21_VERSION_STRING }, { Smb_30, SMB30_VERSION_STRING }, + { Smb_302, SMB302_VERSION_STRING }, }; static int ip_connect(struct TCP_Server_Info *server); @@ -1024,44 +1026,48 @@ static int cifs_parse_security_flavors(char *value, substring_t args[MAX_OPT_ARGS]; + /* + * With mount options, the last one should win. Reset any existing + * settings back to default. + */ + vol->sectype = Unspecified; + vol->sign = false; + switch (match_token(value, cifs_secflavor_tokens, args)) { - case Opt_sec_krb5: - vol->secFlg |= CIFSSEC_MAY_KRB5 | CIFSSEC_MAY_SIGN; - break; - case Opt_sec_krb5i: - vol->secFlg |= CIFSSEC_MAY_KRB5 | CIFSSEC_MUST_SIGN; - break; case Opt_sec_krb5p: - /* vol->secFlg |= CIFSSEC_MUST_SEAL | CIFSSEC_MAY_KRB5; */ - cifs_dbg(VFS, "Krb5 cifs privacy not supported\n"); - break; - case Opt_sec_ntlmssp: - vol->secFlg |= CIFSSEC_MAY_NTLMSSP; + cifs_dbg(VFS, "sec=krb5p is not supported!\n"); + return 1; + case Opt_sec_krb5i: + vol->sign = true; + /* Fallthrough */ + case Opt_sec_krb5: + vol->sectype = Kerberos; break; case Opt_sec_ntlmsspi: - vol->secFlg |= CIFSSEC_MAY_NTLMSSP | CIFSSEC_MUST_SIGN; - break; - case Opt_ntlm: - /* ntlm is default so can be turned off too */ - vol->secFlg |= CIFSSEC_MAY_NTLM; + vol->sign = true; + /* Fallthrough */ + case Opt_sec_ntlmssp: + vol->sectype = RawNTLMSSP; break; case Opt_sec_ntlmi: - vol->secFlg |= CIFSSEC_MAY_NTLM | CIFSSEC_MUST_SIGN; - break; - case Opt_sec_ntlmv2: - vol->secFlg |= CIFSSEC_MAY_NTLMV2; + vol->sign = true; + /* Fallthrough */ + case Opt_ntlm: + vol->sectype = NTLM; break; case Opt_sec_ntlmv2i: - vol->secFlg |= CIFSSEC_MAY_NTLMV2 | CIFSSEC_MUST_SIGN; + vol->sign = true; + /* Fallthrough */ + case Opt_sec_ntlmv2: + vol->sectype = NTLMv2; break; #ifdef CONFIG_CIFS_WEAK_PW_HASH case Opt_sec_lanman: - vol->secFlg |= CIFSSEC_MAY_LANMAN; + vol->sectype = LANMAN; break; #endif case Opt_sec_none: vol->nullauth = 1; - vol->secFlg |= CIFSSEC_MAY_NTLM; break; default: cifs_dbg(VFS, "bad security option: %s\n", value); @@ -1119,6 +1125,10 @@ cifs_parse_smb_version(char *value, struct smb_vol *vol) vol->ops = &smb30_operations; vol->vals = &smb30_values; break; + case Smb_302: + vol->ops = &smb30_operations; /* currently identical with 3.0 */ + vol->vals = &smb302_values; + break; #endif default: cifs_dbg(VFS, "Unknown vers= option specified: %s\n", value); @@ -1424,7 +1434,7 @@ cifs_parse_mount_options(const char *mountdata, const char *devname, vol->local_lease = 1; break; case Opt_sign: - vol->secFlg |= CIFSSEC_MUST_SIGN; + vol->sign = true; break; case Opt_seal: /* we do not do the following in secFlags because seal @@ -1455,6 +1465,9 @@ cifs_parse_mount_options(const char *mountdata, const char *devname, case Opt_sloppy: sloppy = true; break; + case Opt_nosharesock: + vol->nosharesock = true; + break; /* Numeric Values */ case Opt_backupuid: @@ -1978,47 +1991,21 @@ match_address(struct TCP_Server_Info *server, struct sockaddr *addr, static bool match_security(struct TCP_Server_Info *server, struct smb_vol *vol) { - unsigned int secFlags; - - if (vol->secFlg & (~(CIFSSEC_MUST_SIGN | CIFSSEC_MUST_SEAL))) - secFlags = vol->secFlg; - else - secFlags = global_secflags | vol->secFlg; - - switch (server->secType) { - case LANMAN: - if (!(secFlags & (CIFSSEC_MAY_LANMAN|CIFSSEC_MAY_PLNTXT))) - return false; - break; - case NTLMv2: - if (!(secFlags & CIFSSEC_MAY_NTLMV2)) - return false; - break; - case NTLM: - if (!(secFlags & CIFSSEC_MAY_NTLM)) - return false; - break; - case Kerberos: - if (!(secFlags & CIFSSEC_MAY_KRB5)) - return false; - break; - case RawNTLMSSP: - if (!(secFlags & CIFSSEC_MAY_NTLMSSP)) - return false; - break; - default: - /* shouldn't happen */ + /* + * The select_sectype function should either return the vol->sectype + * that was specified, or "Unspecified" if that sectype was not + * compatible with the given NEGOTIATE request. + */ + if (select_sectype(server, vol->sectype) == Unspecified) return false; - } - /* now check if signing mode is acceptable */ - if ((secFlags & CIFSSEC_MAY_SIGN) == 0 && - (server->sec_mode & SECMODE_SIGN_REQUIRED)) - return false; - else if (((secFlags & CIFSSEC_MUST_SIGN) == CIFSSEC_MUST_SIGN) && - (server->sec_mode & - (SECMODE_SIGN_ENABLED|SECMODE_SIGN_REQUIRED)) == 0) - return false; + /* + * Now check if signing mode is acceptable. No need to check + * global_secflags at this point since if MUST_SIGN is set then + * the server->sign had better be too. + */ + if (vol->sign && !server->sign) + return false; return true; } @@ -2027,6 +2014,9 @@ static int match_server(struct TCP_Server_Info *server, struct smb_vol *vol) { struct sockaddr *addr = (struct sockaddr *)&vol->dstaddr; + if (vol->nosharesock) + return 0; + if ((server->vals != vol->vals) || (server->ops != vol->ops)) return 0; @@ -2216,7 +2206,11 @@ out_err: static int match_session(struct cifs_ses *ses, struct smb_vol *vol) { - switch (ses->server->secType) { + if (vol->sectype != Unspecified && + vol->sectype != ses->sectype) + return 0; + + switch (ses->sectype) { case Kerberos: if (!uid_eq(vol->cred_uid, ses->cred_uid)) return 0; @@ -2493,7 +2487,8 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info) ses->cred_uid = volume_info->cred_uid; ses->linux_uid = volume_info->linux_uid; - ses->overrideSecFlg = volume_info->secFlg; + ses->sectype = volume_info->sectype; + ses->sign = volume_info->sign; mutex_lock(&ses->session_mutex); rc = cifs_negotiate_protocol(xid, ses); @@ -3656,7 +3651,7 @@ CIFSTCon(const unsigned int xid, struct cifs_ses *ses, NTLMv2 password here) */ #ifdef CONFIG_CIFS_WEAK_PW_HASH if ((global_secflags & CIFSSEC_MAY_LANMAN) && - (ses->server->secType == LANMAN)) + (ses->sectype == LANMAN)) calc_lanman_hash(tcon->password, ses->server->cryptkey, ses->server->sec_mode & SECMODE_PW_ENCRYPT ? true : false, @@ -3674,8 +3669,7 @@ CIFSTCon(const unsigned int xid, struct cifs_ses *ses, } } - if (ses->server->sec_mode & - (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) + if (ses->server->sign) smb_buffer->Flags2 |= SMBFLG2_SECURITY_SIGNATURE; if (ses->capabilities & CAP_STATUS32) { @@ -3738,7 +3732,7 @@ CIFSTCon(const unsigned int xid, struct cifs_ses *ses, } bcc_ptr += length + 1; bytes_left -= (length + 1); - strncpy(tcon->treeName, tree, MAX_TREE_SIZE); + strlcpy(tcon->treeName, tree, sizeof(tcon->treeName)); /* mostly informational -- no need to fail on error here */ kfree(tcon->nativeFileSystem); @@ -3827,7 +3821,6 @@ cifs_setup_session(const unsigned int xid, struct cifs_ses *ses, int rc = -ENOSYS; struct TCP_Server_Info *server = ses->server; - ses->flags = 0; ses->capabilities = server->capabilities; if (linuxExtEnabled == 0) ses->capabilities &= (~server->vals->cap_unix); @@ -3848,6 +3841,8 @@ cifs_setup_session(const unsigned int xid, struct cifs_ses *ses, server->sequence_number = 0x2; server->session_estab = true; ses->auth_key.response = NULL; + if (server->ops->generate_signingkey) + server->ops->generate_signingkey(server); } mutex_unlock(&server->srv_mutex); @@ -3870,23 +3865,11 @@ cifs_setup_session(const unsigned int xid, struct cifs_ses *ses, static int cifs_set_vol_auth(struct smb_vol *vol, struct cifs_ses *ses) { - switch (ses->server->secType) { - case Kerberos: - vol->secFlg = CIFSSEC_MUST_KRB5; + vol->sectype = ses->sectype; + + /* krb5 is special, since we don't need username or pw */ + if (vol->sectype == Kerberos) return 0; - case NTLMv2: - vol->secFlg = CIFSSEC_MUST_NTLMV2; - break; - case NTLM: - vol->secFlg = CIFSSEC_MUST_NTLM; - break; - case RawNTLMSSP: - vol->secFlg = CIFSSEC_MUST_NTLMSSP; - break; - case LANMAN: - vol->secFlg = CIFSSEC_MUST_LANMAN; - break; - } return cifs_set_cifscreds(vol, ses); } @@ -3912,6 +3895,8 @@ cifs_construct_tcon(struct cifs_sb_info *cifs_sb, kuid_t fsuid) vol_info->nocase = master_tcon->nocase; vol_info->local_lease = master_tcon->local_lease; vol_info->no_linux_ext = !master_tcon->unix_ext; + vol_info->sectype = master_tcon->ses->sectype; + vol_info->sign = master_tcon->ses->sign; rc = cifs_set_vol_auth(vol_info, master_tcon->ses); if (rc) { diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c index 5699b5036ed8..5175aebf6737 100644 --- a/fs/cifs/dir.c +++ b/fs/cifs/dir.c @@ -822,8 +822,7 @@ const struct dentry_operations cifs_dentry_ops = { /* d_delete: cifs_d_delete, */ /* not needed except for debugging */ }; -static int cifs_ci_hash(const struct dentry *dentry, const struct inode *inode, - struct qstr *q) +static int cifs_ci_hash(const struct dentry *dentry, struct qstr *q) { struct nls_table *codepage = CIFS_SB(dentry->d_sb)->local_nls; unsigned long hash; @@ -838,12 +837,10 @@ static int cifs_ci_hash(const struct dentry *dentry, const struct inode *inode, return 0; } -static int cifs_ci_compare(const struct dentry *parent, - const struct inode *pinode, - const struct dentry *dentry, const struct inode *inode, +static int cifs_ci_compare(const struct dentry *parent, const struct dentry *dentry, unsigned int len, const char *str, const struct qstr *name) { - struct nls_table *codepage = CIFS_SB(pinode->i_sb)->local_nls; + struct nls_table *codepage = CIFS_SB(parent->d_sb)->local_nls; if ((name->len == len) && (nls_strnicmp(codepage, name->name, str, len) == 0)) diff --git a/fs/cifs/file.c b/fs/cifs/file.c index 48b29d24c9f4..91d8629e69a2 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -999,7 +999,7 @@ try_again: rc = wait_event_interruptible(flock->fl_wait, !flock->fl_next); if (!rc) goto try_again; - locks_delete_block(flock); + posix_unblock_lock(flock); } return rc; } @@ -1092,6 +1092,7 @@ struct lock_to_push { static int cifs_push_posix_locks(struct cifsFileInfo *cfile) { + struct inode *inode = cfile->dentry->d_inode; struct cifs_tcon *tcon = tlink_tcon(cfile->tlink); struct file_lock *flock, **before; unsigned int count = 0, i = 0; @@ -1102,12 +1103,12 @@ cifs_push_posix_locks(struct cifsFileInfo *cfile) xid = get_xid(); - lock_flocks(); - cifs_for_each_lock(cfile->dentry->d_inode, before) { + spin_lock(&inode->i_lock); + cifs_for_each_lock(inode, before) { if ((*before)->fl_flags & FL_POSIX) count++; } - unlock_flocks(); + spin_unlock(&inode->i_lock); INIT_LIST_HEAD(&locks_to_send); @@ -1126,8 +1127,8 @@ cifs_push_posix_locks(struct cifsFileInfo *cfile) } el = locks_to_send.next; - lock_flocks(); - cifs_for_each_lock(cfile->dentry->d_inode, before) { + spin_lock(&inode->i_lock); + cifs_for_each_lock(inode, before) { flock = *before; if ((flock->fl_flags & FL_POSIX) == 0) continue; @@ -1152,7 +1153,7 @@ cifs_push_posix_locks(struct cifsFileInfo *cfile) lck->offset = flock->fl_start; el = el->next; } - unlock_flocks(); + spin_unlock(&inode->i_lock); list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) { int stored_rc; @@ -3546,11 +3547,12 @@ static int cifs_release_page(struct page *page, gfp_t gfp) return cifs_fscache_release_page(page, gfp); } -static void cifs_invalidate_page(struct page *page, unsigned long offset) +static void cifs_invalidate_page(struct page *page, unsigned int offset, + unsigned int length) { struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host); - if (offset == 0) + if (offset == 0 && length == PAGE_CACHE_SIZE) cifs_fscache_invalidate_page(page, &cifsi->vfs_inode); } diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c index 1bec014779fd..f7d4b2285efe 100644 --- a/fs/cifs/misc.c +++ b/fs/cifs/misc.c @@ -267,8 +267,7 @@ header_assemble(struct smb_hdr *buffer, char smb_command /* command */ , if (treeCon->nocase) buffer->Flags |= SMBFLG_CASELESS; if ((treeCon->ses) && (treeCon->ses->server)) - if (treeCon->ses->server->sec_mode & - (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) + if (treeCon->ses->server->sign) buffer->Flags2 |= SMBFLG2_SECURITY_SIGNATURE; } diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c index 770d5a9781c1..ab8778469394 100644 --- a/fs/cifs/readdir.c +++ b/fs/cifs/readdir.c @@ -126,6 +126,22 @@ out: dput(dentry); } +/* + * Is it possible that this directory might turn out to be a DFS referral + * once we go to try and use it? + */ +static bool +cifs_dfs_is_possible(struct cifs_sb_info *cifs_sb) +{ +#ifdef CONFIG_CIFS_DFS_UPCALL + struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb); + + if (tcon->Flags & SMB_SHARE_IS_IN_DFS) + return true; +#endif + return false; +} + static void cifs_fill_common_info(struct cifs_fattr *fattr, struct cifs_sb_info *cifs_sb) { @@ -135,6 +151,19 @@ cifs_fill_common_info(struct cifs_fattr *fattr, struct cifs_sb_info *cifs_sb) if (fattr->cf_cifsattrs & ATTR_DIRECTORY) { fattr->cf_mode = S_IFDIR | cifs_sb->mnt_dir_mode; fattr->cf_dtype = DT_DIR; + /* + * Windows CIFS servers generally make DFS referrals look + * like directories in FIND_* responses with the reparse + * attribute flag also set (since DFS junctions are + * reparse points). We must revalidate at least these + * directory inodes before trying to use them (if + * they are DFS we will get PATH_NOT_COVERED back + * when queried directly and can then try to connect + * to the DFS target) + */ + if (cifs_dfs_is_possible(cifs_sb) && + (fattr->cf_cifsattrs & ATTR_REPARSE)) + fattr->cf_flags |= CIFS_FATTR_NEED_REVAL; } else { fattr->cf_mode = S_IFREG | cifs_sb->mnt_file_mode; fattr->cf_dtype = DT_REG; @@ -537,14 +566,14 @@ static int cifs_save_resume_key(const char *current_entry, * every entry (do not increment for . or .. entry). */ static int -find_cifs_entry(const unsigned int xid, struct cifs_tcon *tcon, +find_cifs_entry(const unsigned int xid, struct cifs_tcon *tcon, loff_t pos, struct file *file, char **current_entry, int *num_to_ret) { __u16 search_flags; int rc = 0; int pos_in_buf = 0; loff_t first_entry_in_buffer; - loff_t index_to_find = file->f_pos; + loff_t index_to_find = pos; struct cifsFileInfo *cfile = file->private_data; struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb); struct TCP_Server_Info *server = tcon->ses->server; @@ -659,8 +688,9 @@ find_cifs_entry(const unsigned int xid, struct cifs_tcon *tcon, return rc; } -static int cifs_filldir(char *find_entry, struct file *file, filldir_t filldir, - void *dirent, char *scratch_buf, unsigned int max_len) +static int cifs_filldir(char *find_entry, struct file *file, + struct dir_context *ctx, + char *scratch_buf, unsigned int max_len) { struct cifsFileInfo *file_info = file->private_data; struct super_block *sb = file->f_path.dentry->d_sb; @@ -740,13 +770,11 @@ static int cifs_filldir(char *find_entry, struct file *file, filldir_t filldir, cifs_prime_dcache(file->f_dentry, &name, &fattr); ino = cifs_uniqueid_to_ino_t(fattr.cf_uniqueid); - rc = filldir(dirent, name.name, name.len, file->f_pos, ino, - fattr.cf_dtype); - return rc; + return !dir_emit(ctx, name.name, name.len, ino, fattr.cf_dtype); } -int cifs_readdir(struct file *file, void *direntry, filldir_t filldir) +int cifs_readdir(struct file *file, struct dir_context *ctx) { int rc = 0; unsigned int xid; @@ -772,103 +800,86 @@ int cifs_readdir(struct file *file, void *direntry, filldir_t filldir) goto rddir2_exit; } - switch ((int) file->f_pos) { - case 0: - if (filldir(direntry, ".", 1, file->f_pos, - file_inode(file)->i_ino, DT_DIR) < 0) { - cifs_dbg(VFS, "Filldir for current dir failed\n"); - rc = -ENOMEM; - break; - } - file->f_pos++; - case 1: - if (filldir(direntry, "..", 2, file->f_pos, - parent_ino(file->f_path.dentry), DT_DIR) < 0) { - cifs_dbg(VFS, "Filldir for parent dir failed\n"); - rc = -ENOMEM; - break; - } - file->f_pos++; - default: - /* 1) If search is active, - is in current search buffer? - if it before then restart search - if after then keep searching till find it */ - - if (file->private_data == NULL) { - rc = -EINVAL; - free_xid(xid); - return rc; - } - cifsFile = file->private_data; - if (cifsFile->srch_inf.endOfSearch) { - if (cifsFile->srch_inf.emptyDir) { - cifs_dbg(FYI, "End of search, empty dir\n"); - rc = 0; - break; - } - } /* else { - cifsFile->invalidHandle = true; - tcon->ses->server->close(xid, tcon, &cifsFile->fid); - } */ + if (!dir_emit_dots(file, ctx)) + goto rddir2_exit; - tcon = tlink_tcon(cifsFile->tlink); - rc = find_cifs_entry(xid, tcon, file, ¤t_entry, - &num_to_fill); - if (rc) { - cifs_dbg(FYI, "fce error %d\n", rc); - goto rddir2_exit; - } else if (current_entry != NULL) { - cifs_dbg(FYI, "entry %lld found\n", file->f_pos); - } else { - cifs_dbg(FYI, "could not find entry\n"); + /* 1) If search is active, + is in current search buffer? + if it before then restart search + if after then keep searching till find it */ + + if (file->private_data == NULL) { + rc = -EINVAL; + goto rddir2_exit; + } + cifsFile = file->private_data; + if (cifsFile->srch_inf.endOfSearch) { + if (cifsFile->srch_inf.emptyDir) { + cifs_dbg(FYI, "End of search, empty dir\n"); + rc = 0; goto rddir2_exit; } - cifs_dbg(FYI, "loop through %d times filling dir for net buf %p\n", - num_to_fill, cifsFile->srch_inf.ntwrk_buf_start); - max_len = tcon->ses->server->ops->calc_smb_size( - cifsFile->srch_inf.ntwrk_buf_start); - end_of_smb = cifsFile->srch_inf.ntwrk_buf_start + max_len; - - tmp_buf = kmalloc(UNICODE_NAME_MAX, GFP_KERNEL); - if (tmp_buf == NULL) { - rc = -ENOMEM; + } /* else { + cifsFile->invalidHandle = true; + tcon->ses->server->close(xid, tcon, &cifsFile->fid); + } */ + + tcon = tlink_tcon(cifsFile->tlink); + rc = find_cifs_entry(xid, tcon, ctx->pos, file, ¤t_entry, + &num_to_fill); + if (rc) { + cifs_dbg(FYI, "fce error %d\n", rc); + goto rddir2_exit; + } else if (current_entry != NULL) { + cifs_dbg(FYI, "entry %lld found\n", ctx->pos); + } else { + cifs_dbg(FYI, "could not find entry\n"); + goto rddir2_exit; + } + cifs_dbg(FYI, "loop through %d times filling dir for net buf %p\n", + num_to_fill, cifsFile->srch_inf.ntwrk_buf_start); + max_len = tcon->ses->server->ops->calc_smb_size( + cifsFile->srch_inf.ntwrk_buf_start); + end_of_smb = cifsFile->srch_inf.ntwrk_buf_start + max_len; + + tmp_buf = kmalloc(UNICODE_NAME_MAX, GFP_KERNEL); + if (tmp_buf == NULL) { + rc = -ENOMEM; + goto rddir2_exit; + } + + for (i = 0; i < num_to_fill; i++) { + if (current_entry == NULL) { + /* evaluate whether this case is an error */ + cifs_dbg(VFS, "past SMB end, num to fill %d i %d\n", + num_to_fill, i); break; } - - for (i = 0; (i < num_to_fill) && (rc == 0); i++) { - if (current_entry == NULL) { - /* evaluate whether this case is an error */ - cifs_dbg(VFS, "past SMB end, num to fill %d i %d\n", - num_to_fill, i); - break; - } - /* - * if buggy server returns . and .. late do we want to - * check for that here? - */ - rc = cifs_filldir(current_entry, file, filldir, - direntry, tmp_buf, max_len); - if (rc == -EOVERFLOW) { + /* + * if buggy server returns . and .. late do we want to + * check for that here? + */ + rc = cifs_filldir(current_entry, file, ctx, + tmp_buf, max_len); + if (rc) { + if (rc > 0) rc = 0; - break; - } - - file->f_pos++; - if (file->f_pos == - cifsFile->srch_inf.index_of_last_entry) { - cifs_dbg(FYI, "last entry in buf at pos %lld %s\n", - file->f_pos, tmp_buf); - cifs_save_resume_key(current_entry, cifsFile); - break; - } else - current_entry = - nxt_dir_entry(current_entry, end_of_smb, - cifsFile->srch_inf.info_level); + break; } - kfree(tmp_buf); - break; - } /* end switch */ + + ctx->pos++; + if (ctx->pos == + cifsFile->srch_inf.index_of_last_entry) { + cifs_dbg(FYI, "last entry in buf at pos %lld %s\n", + ctx->pos, tmp_buf); + cifs_save_resume_key(current_entry, cifsFile); + break; + } else + current_entry = + nxt_dir_entry(current_entry, end_of_smb, + cifsFile->srch_inf.info_level); + } + kfree(tmp_buf); rddir2_exit: free_xid(xid); diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c index f230571a7ab3..79358e341fd2 100644 --- a/fs/cifs/sess.c +++ b/fs/cifs/sess.c @@ -138,8 +138,7 @@ static __u32 cifs_ssetup_hdr(struct cifs_ses *ses, SESSION_SETUP_ANDX *pSMB) capabilities = CAP_LARGE_FILES | CAP_NT_SMBS | CAP_LEVEL_II_OPLOCKS | CAP_LARGE_WRITE_X | CAP_LARGE_READ_X; - if (ses->server->sec_mode & - (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) + if (ses->server->sign) pSMB->req.hdr.Flags2 |= SMBFLG2_SECURITY_SIGNATURE; if (ses->capabilities & CAP_UNICODE) { @@ -310,11 +309,10 @@ decode_unicode_ssetup(char **pbcc_area, int bleft, struct cifs_ses *ses, return; } -static int decode_ascii_ssetup(char **pbcc_area, __u16 bleft, - struct cifs_ses *ses, - const struct nls_table *nls_cp) +static void decode_ascii_ssetup(char **pbcc_area, __u16 bleft, + struct cifs_ses *ses, + const struct nls_table *nls_cp) { - int rc = 0; int len; char *bcc_ptr = *pbcc_area; @@ -322,24 +320,22 @@ static int decode_ascii_ssetup(char **pbcc_area, __u16 bleft, len = strnlen(bcc_ptr, bleft); if (len >= bleft) - return rc; + return; kfree(ses->serverOS); ses->serverOS = kzalloc(len + 1, GFP_KERNEL); if (ses->serverOS) strncpy(ses->serverOS, bcc_ptr, len); - if (strncmp(ses->serverOS, "OS/2", 4) == 0) { + if (strncmp(ses->serverOS, "OS/2", 4) == 0) cifs_dbg(FYI, "OS/2 server\n"); - ses->flags |= CIFS_SES_OS2; - } bcc_ptr += len + 1; bleft -= len + 1; len = strnlen(bcc_ptr, bleft); if (len >= bleft) - return rc; + return; kfree(ses->serverNOS); @@ -352,7 +348,7 @@ static int decode_ascii_ssetup(char **pbcc_area, __u16 bleft, len = strnlen(bcc_ptr, bleft); if (len > bleft) - return rc; + return; /* No domain field in LANMAN case. Domain is returned by old servers in the SMB negprot response */ @@ -360,8 +356,6 @@ static int decode_ascii_ssetup(char **pbcc_area, __u16 bleft, but thus do return domain here we could add parsing for it later, but it is not very important */ cifs_dbg(FYI, "ascii: bytes left %d\n", bleft); - - return rc; } int decode_ntlmssp_challenge(char *bcc_ptr, int blob_len, @@ -432,8 +426,7 @@ void build_ntlmssp_negotiate_blob(unsigned char *pbuffer, flags = NTLMSSP_NEGOTIATE_56 | NTLMSSP_REQUEST_TARGET | NTLMSSP_NEGOTIATE_128 | NTLMSSP_NEGOTIATE_UNICODE | NTLMSSP_NEGOTIATE_NTLM | NTLMSSP_NEGOTIATE_EXTENDED_SEC; - if (ses->server->sec_mode & - (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) { + if (ses->server->sign) { flags |= NTLMSSP_NEGOTIATE_SIGN; if (!ses->server->session_estab) flags |= NTLMSSP_NEGOTIATE_KEY_XCH; @@ -471,8 +464,7 @@ int build_ntlmssp_auth_blob(unsigned char *pbuffer, NTLMSSP_REQUEST_TARGET | NTLMSSP_NEGOTIATE_TARGET_INFO | NTLMSSP_NEGOTIATE_128 | NTLMSSP_NEGOTIATE_UNICODE | NTLMSSP_NEGOTIATE_NTLM | NTLMSSP_NEGOTIATE_EXTENDED_SEC; - if (ses->server->sec_mode & - (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) { + if (ses->server->sign) { flags |= NTLMSSP_NEGOTIATE_SIGN; if (!ses->server->session_estab) flags |= NTLMSSP_NEGOTIATE_KEY_XCH; @@ -558,6 +550,56 @@ setup_ntlmv2_ret: return rc; } +enum securityEnum +select_sectype(struct TCP_Server_Info *server, enum securityEnum requested) +{ + switch (server->negflavor) { + case CIFS_NEGFLAVOR_EXTENDED: + switch (requested) { + case Kerberos: + case RawNTLMSSP: + return requested; + case Unspecified: + if (server->sec_ntlmssp && + (global_secflags & CIFSSEC_MAY_NTLMSSP)) + return RawNTLMSSP; + if ((server->sec_kerberos || server->sec_mskerberos) && + (global_secflags & CIFSSEC_MAY_KRB5)) + return Kerberos; + /* Fallthrough */ + default: + return Unspecified; + } + case CIFS_NEGFLAVOR_UNENCAP: + switch (requested) { + case NTLM: + case NTLMv2: + return requested; + case Unspecified: + if (global_secflags & CIFSSEC_MAY_NTLMV2) + return NTLMv2; + if (global_secflags & CIFSSEC_MAY_NTLM) + return NTLM; + /* Fallthrough */ + default: + return Unspecified; + } + case CIFS_NEGFLAVOR_LANMAN: + switch (requested) { + case LANMAN: + return requested; + case Unspecified: + if (global_secflags & CIFSSEC_MAY_LANMAN) + return LANMAN; + /* Fallthrough */ + default: + return Unspecified; + } + default: + return Unspecified; + } +} + int CIFS_SessSetup(const unsigned int xid, struct cifs_ses *ses, const struct nls_table *nls_cp) @@ -579,11 +621,18 @@ CIFS_SessSetup(const unsigned int xid, struct cifs_ses *ses, u16 blob_len; char *ntlmsspblob = NULL; - if (ses == NULL) + if (ses == NULL) { + WARN(1, "%s: ses == NULL!", __func__); return -EINVAL; + } - type = ses->server->secType; + type = select_sectype(ses->server, ses->sectype); cifs_dbg(FYI, "sess setup type %d\n", type); + if (type == Unspecified) { + cifs_dbg(VFS, "Unable to select appropriate authentication method!"); + return -EINVAL; + } + if (type == RawNTLMSSP) { /* if memory allocation is successful, caller of this function * frees it. @@ -643,8 +692,6 @@ ssetup_ntlmssp_authenticate: } bcc_ptr = str_area; - ses->flags &= ~CIFS_SES_LANMAN; - iov[1].iov_base = NULL; iov[1].iov_len = 0; @@ -668,7 +715,6 @@ ssetup_ntlmssp_authenticate: ses->server->sec_mode & SECMODE_PW_ENCRYPT ? true : false, lnm_session_key); - ses->flags |= CIFS_SES_LANMAN; memcpy(bcc_ptr, (char *)lnm_session_key, CIFS_AUTH_RESP_SIZE); bcc_ptr += CIFS_AUTH_RESP_SIZE; @@ -938,8 +984,7 @@ ssetup_ntlmssp_authenticate: } decode_unicode_ssetup(&bcc_ptr, bytes_remaining, ses, nls_cp); } else { - rc = decode_ascii_ssetup(&bcc_ptr, bytes_remaining, - ses, nls_cp); + decode_ascii_ssetup(&bcc_ptr, bytes_remaining, ses, nls_cp); } ssetup_exit: diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c index 3efdb9d5c0b8..e813f04511d8 100644 --- a/fs/cifs/smb1ops.c +++ b/fs/cifs/smb1ops.c @@ -449,8 +449,7 @@ cifs_negotiate_wsize(struct cifs_tcon *tcon, struct smb_vol *volume_info) * WRITEX header, not including the 4 byte RFC1001 length. */ if (!(server->capabilities & CAP_LARGE_WRITE_X) || - (!(server->capabilities & CAP_UNIX) && - (server->sec_mode & (SECMODE_SIGN_ENABLED|SECMODE_SIGN_REQUIRED)))) + (!(server->capabilities & CAP_UNIX) && server->sign)) wsize = min_t(unsigned int, wsize, server->maxBuf - sizeof(WRITE_REQ) + 4); @@ -765,20 +764,14 @@ smb_set_file_info(struct inode *inode, const char *full_path, } tcon = tlink_tcon(tlink); - /* - * NT4 apparently returns success on this call, but it doesn't really - * work. - */ - if (!(tcon->ses->flags & CIFS_SES_NT4)) { - rc = CIFSSMBSetPathInfo(xid, tcon, full_path, buf, - cifs_sb->local_nls, + rc = CIFSSMBSetPathInfo(xid, tcon, full_path, buf, cifs_sb->local_nls, cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); - if (rc == 0) { - cinode->cifsAttrs = le32_to_cpu(buf->Attributes); - goto out; - } else if (rc != -EOPNOTSUPP && rc != -EINVAL) - goto out; + if (rc == 0) { + cinode->cifsAttrs = le32_to_cpu(buf->Attributes); + goto out; + } else if (rc != -EOPNOTSUPP && rc != -EINVAL) { + goto out; } cifs_dbg(FYI, "calling SetFileInfo since SetPathInfo for times not supported by this server\n"); @@ -964,4 +957,6 @@ struct smb_version_values smb1_values = { .cap_nt_find = CAP_NT_SMBS | CAP_NT_FIND, .cap_large_files = CAP_LARGE_FILES, .oplock_read = OPLOCK_READ, + .signing_enabled = SECMODE_SIGN_ENABLED, + .signing_required = SECMODE_SIGN_REQUIRED, }; diff --git a/fs/cifs/smb2glob.h b/fs/cifs/smb2glob.h index 7c0e2143e775..c38350851b08 100644 --- a/fs/cifs/smb2glob.h +++ b/fs/cifs/smb2glob.h @@ -54,5 +54,7 @@ #define SMB2_SIGNATURE_SIZE (16) #define SMB2_NTLMV2_SESSKEY_SIZE (16) #define SMB2_HMACSHA256_SIZE (32) +#define SMB2_CMACAES_SIZE (16) +#define SMB3_SIGNKEY_SIZE (16) #endif /* _SMB2_GLOB_H */ diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c index 10383d8c015b..b0c43345cd98 100644 --- a/fs/cifs/smb2misc.c +++ b/fs/cifs/smb2misc.c @@ -266,6 +266,10 @@ smb2_get_data_area_len(int *off, int *len, struct smb2_hdr *hdr) ((struct smb2_query_directory_rsp *)hdr)->OutputBufferLength); break; case SMB2_IOCTL: + *off = le32_to_cpu( + ((struct smb2_ioctl_rsp *)hdr)->OutputOffset); + *len = le32_to_cpu(((struct smb2_ioctl_rsp *)hdr)->OutputCount); + break; case SMB2_CHANGE_NOTIFY: default: /* BB FIXME for unimplemented cases above */ diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c index f2e76f3b0c61..6d15cab95b99 100644 --- a/fs/cifs/smb2ops.c +++ b/fs/cifs/smb2ops.c @@ -281,6 +281,25 @@ smb2_clear_stats(struct cifs_tcon *tcon) } static void +smb2_dump_share_caps(struct seq_file *m, struct cifs_tcon *tcon) +{ + seq_puts(m, "\n\tShare Capabilities:"); + if (tcon->capabilities & SMB2_SHARE_CAP_DFS) + seq_puts(m, " DFS,"); + if (tcon->capabilities & SMB2_SHARE_CAP_CONTINUOUS_AVAILABILITY) + seq_puts(m, " CONTINUOUS AVAILABILITY,"); + if (tcon->capabilities & SMB2_SHARE_CAP_SCALEOUT) + seq_puts(m, " SCALEOUT,"); + if (tcon->capabilities & SMB2_SHARE_CAP_CLUSTER) + seq_puts(m, " CLUSTER,"); + if (tcon->capabilities & SMB2_SHARE_CAP_ASYMMETRIC) + seq_puts(m, " ASYMMETRIC,"); + if (tcon->capabilities == 0) + seq_puts(m, " None"); + seq_printf(m, "\tShare Flags: 0x%x", tcon->share_flags); +} + +static void smb2_print_stats(struct seq_file *m, struct cifs_tcon *tcon) { #ifdef CONFIG_CIFS_STATS @@ -292,7 +311,6 @@ smb2_print_stats(struct seq_file *m, struct cifs_tcon *tcon) seq_printf(m, "\nSessionSetups: %d sent %d failed", atomic_read(&sent[SMB2_SESSION_SETUP_HE]), atomic_read(&failed[SMB2_SESSION_SETUP_HE])); -#define SMB2LOGOFF 0x0002 /* trivial request/resp */ seq_printf(m, "\nLogoffs: %d sent %d failed", atomic_read(&sent[SMB2_LOGOFF_HE]), atomic_read(&failed[SMB2_LOGOFF_HE])); @@ -645,6 +663,7 @@ struct smb_version_operations smb30_operations = { .dump_detail = smb2_dump_detail, .clear_stats = smb2_clear_stats, .print_stats = smb2_print_stats, + .dump_share_caps = smb2_dump_share_caps, .is_oplock_break = smb2_is_valid_oplock_break, .need_neg = smb2_need_neg, .negotiate = smb2_negotiate, @@ -690,6 +709,7 @@ struct smb_version_operations smb30_operations = { .get_lease_key = smb2_get_lease_key, .set_lease_key = smb2_set_lease_key, .new_lease_key = smb2_new_lease_key, + .generate_signingkey = generate_smb3signingkey, .calc_signature = smb3_calc_signature, }; @@ -709,6 +729,8 @@ struct smb_version_values smb20_values = { .cap_nt_find = SMB2_NT_FIND, .cap_large_files = SMB2_LARGE_FILES, .oplock_read = SMB2_OPLOCK_LEVEL_II, + .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED, + .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED, }; struct smb_version_values smb21_values = { @@ -727,6 +749,8 @@ struct smb_version_values smb21_values = { .cap_nt_find = SMB2_NT_FIND, .cap_large_files = SMB2_LARGE_FILES, .oplock_read = SMB2_OPLOCK_LEVEL_II, + .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED, + .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED, }; struct smb_version_values smb30_values = { @@ -745,4 +769,26 @@ struct smb_version_values smb30_values = { .cap_nt_find = SMB2_NT_FIND, .cap_large_files = SMB2_LARGE_FILES, .oplock_read = SMB2_OPLOCK_LEVEL_II, + .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED, + .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED, +}; + +struct smb_version_values smb302_values = { + .version_string = SMB302_VERSION_STRING, + .protocol_id = SMB302_PROT_ID, + .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU, + .large_lock_type = 0, + .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK, + .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK, + .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK, + .header_size = sizeof(struct smb2_hdr), + .max_header_size = MAX_SMB2_HDR_SIZE, + .read_rsp_size = sizeof(struct smb2_read_rsp) - 1, + .lock_cmd = SMB2_LOCK, + .cap_unix = 0, + .cap_nt_find = SMB2_NT_FIND, + .cap_large_files = SMB2_LARGE_FILES, + .oplock_read = SMB2_OPLOCK_LEVEL_II, + .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED, + .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED, }; diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c index 2b95ce2b54e8..2b312e4eeaa6 100644 --- a/fs/cifs/smb2pdu.c +++ b/fs/cifs/smb2pdu.c @@ -1,7 +1,7 @@ /* * fs/cifs/smb2pdu.c * - * Copyright (C) International Business Machines Corp., 2009, 2012 + * Copyright (C) International Business Machines Corp., 2009, 2013 * Etersoft, 2012 * Author(s): Steve French (sfrench@us.ibm.com) * Pavel Shilovsky (pshilovsky@samba.org) 2012 @@ -108,19 +108,33 @@ smb2_hdr_assemble(struct smb2_hdr *hdr, __le16 smb2_cmd /* command */ , if (!tcon) goto out; + /* BB FIXME when we do write > 64K add +1 for every 64K in req or rsp */ + /* GLOBAL_CAP_LARGE_MTU will only be set if dialect > SMB2.02 */ + /* See sections 2.2.4 and 3.2.4.1.5 of MS-SMB2 */ + if ((tcon->ses) && + (tcon->ses->server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU)) + hdr->CreditCharge = cpu_to_le16(1); + /* else CreditCharge MBZ */ + hdr->TreeId = tcon->tid; /* Uid is not converted */ if (tcon->ses) hdr->SessionId = tcon->ses->Suid; - /* BB check following DFS flags BB */ - /* BB do we have to add check for SHI1005_FLAGS_DFS_ROOT too? */ - if (tcon->share_flags & SHI1005_FLAGS_DFS) - hdr->Flags |= SMB2_FLAGS_DFS_OPERATIONS; - /* BB how does SMB2 do case sensitive? */ - /* if (tcon->nocase) - hdr->Flags |= SMBFLG_CASELESS; */ - if (tcon->ses && tcon->ses->server && - (tcon->ses->server->sec_mode & SECMODE_SIGN_REQUIRED)) + + /* + * If we would set SMB2_FLAGS_DFS_OPERATIONS on open we also would have + * to pass the path on the Open SMB prefixed by \\server\share. + * Not sure when we would need to do the augmented path (if ever) and + * setting this flag breaks the SMB2 open operation since it is + * illegal to send an empty path name (without \\server\share prefix) + * when the DFS flag is set in the SMB open header. We could + * consider setting the flag on all operations other than open + * but it is safer to net set it for now. + */ +/* if (tcon->share_flags & SHI1005_FLAGS_DFS) + hdr->Flags |= SMB2_FLAGS_DFS_OPERATIONS; */ + + if (tcon->ses && tcon->ses->server && tcon->ses->server->sign) hdr->Flags |= SMB2_FLAGS_SIGNED; out: pdu->StructureSize2 = cpu_to_le16(parmsize); @@ -328,34 +342,22 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses) struct kvec iov[1]; int rc = 0; int resp_buftype; - struct TCP_Server_Info *server; - unsigned int sec_flags; - u16 temp = 0; + struct TCP_Server_Info *server = ses->server; int blob_offset, blob_length; char *security_blob; int flags = CIFS_NEG_OP; cifs_dbg(FYI, "Negotiate protocol\n"); - if (ses->server) - server = ses->server; - else { - rc = -EIO; - return rc; + if (!server) { + WARN(1, "%s: server is NULL!\n", __func__); + return -EIO; } rc = small_smb2_init(SMB2_NEGOTIATE, NULL, (void **) &req); if (rc) return rc; - /* if any of auth flags (ie not sign or seal) are overriden use them */ - if (ses->overrideSecFlg & (~(CIFSSEC_MUST_SIGN | CIFSSEC_MUST_SEAL))) - sec_flags = ses->overrideSecFlg; /* BB FIXME fix sign flags?*/ - else /* if override flags set only sign/seal OR them with global auth */ - sec_flags = global_secflags | ses->overrideSecFlg; - - cifs_dbg(FYI, "sec_flags 0x%x\n", sec_flags); - req->hdr.SessionId = 0; req->Dialects[0] = cpu_to_le16(ses->server->vals->protocol_id); @@ -364,12 +366,12 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses) inc_rfc1001_len(req, 2); /* only one of SMB2 signing flags may be set in SMB2 request */ - if ((sec_flags & CIFSSEC_MUST_SIGN) == CIFSSEC_MUST_SIGN) - temp = SMB2_NEGOTIATE_SIGNING_REQUIRED; - else if (sec_flags & CIFSSEC_MAY_SIGN) /* MAY_SIGN is a single flag */ - temp = SMB2_NEGOTIATE_SIGNING_ENABLED; - - req->SecurityMode = cpu_to_le16(temp); + if (ses->sign) + req->SecurityMode = cpu_to_le16(SMB2_NEGOTIATE_SIGNING_REQUIRED); + else if (global_secflags & CIFSSEC_MAY_SIGN) + req->SecurityMode = cpu_to_le16(SMB2_NEGOTIATE_SIGNING_ENABLED); + else + req->SecurityMode = 0; req->Capabilities = cpu_to_le32(ses->server->vals->req_capabilities); @@ -399,6 +401,8 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses) cifs_dbg(FYI, "negotiated smb2.1 dialect\n"); else if (rsp->DialectRevision == cpu_to_le16(SMB30_PROT_ID)) cifs_dbg(FYI, "negotiated smb3.0 dialect\n"); + else if (rsp->DialectRevision == cpu_to_le16(SMB302_PROT_ID)) + cifs_dbg(FYI, "negotiated smb3.02 dialect\n"); else { cifs_dbg(VFS, "Illegal dialect returned by server %d\n", le16_to_cpu(rsp->DialectRevision)); @@ -407,6 +411,8 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses) } server->dialect = le16_to_cpu(rsp->DialectRevision); + /* SMB2 only has an extended negflavor */ + server->negflavor = CIFS_NEGFLAVOR_EXTENDED; server->maxBuf = le32_to_cpu(rsp->MaxTransactSize); server->max_read = le32_to_cpu(rsp->MaxReadSize); server->max_write = le32_to_cpu(rsp->MaxWriteSize); @@ -418,44 +424,22 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses) security_blob = smb2_get_data_area_len(&blob_offset, &blob_length, &rsp->hdr); - if (blob_length == 0) { - cifs_dbg(VFS, "missing security blob on negprot\n"); - rc = -EIO; - goto neg_exit; - } - - cifs_dbg(FYI, "sec_flags 0x%x\n", sec_flags); - if ((sec_flags & CIFSSEC_MUST_SIGN) == CIFSSEC_MUST_SIGN) { - cifs_dbg(FYI, "Signing required\n"); - if (!(server->sec_mode & (SMB2_NEGOTIATE_SIGNING_REQUIRED | - SMB2_NEGOTIATE_SIGNING_ENABLED))) { - cifs_dbg(VFS, "signing required but server lacks support\n"); - rc = -EOPNOTSUPP; - goto neg_exit; - } - server->sec_mode |= SECMODE_SIGN_REQUIRED; - } else if (sec_flags & CIFSSEC_MAY_SIGN) { - cifs_dbg(FYI, "Signing optional\n"); - if (server->sec_mode & SMB2_NEGOTIATE_SIGNING_REQUIRED) { - cifs_dbg(FYI, "Server requires signing\n"); - server->sec_mode |= SECMODE_SIGN_REQUIRED; - } else { - server->sec_mode &= - ~(SECMODE_SIGN_ENABLED | SECMODE_SIGN_REQUIRED); - } - } else { - cifs_dbg(FYI, "Signing disabled\n"); - if (server->sec_mode & SMB2_NEGOTIATE_SIGNING_REQUIRED) { - cifs_dbg(VFS, "Server requires packet signing to be enabled in /proc/fs/cifs/SecurityFlags\n"); - rc = -EOPNOTSUPP; - goto neg_exit; - } - server->sec_mode &= - ~(SECMODE_SIGN_ENABLED | SECMODE_SIGN_REQUIRED); - } + /* + * See MS-SMB2 section 2.2.4: if no blob, client picks default which + * for us will be + * ses->sectype = RawNTLMSSP; + * but for time being this is our only auth choice so doesn't matter. + * We just found a server which sets blob length to zero expecting raw. + */ + if (blob_length == 0) + cifs_dbg(FYI, "missing security blob on negprot\n"); + rc = cifs_enable_signing(server, ses->sign); #ifdef CONFIG_SMB2_ASN1 /* BB REMOVEME when updated asn1.c ready */ - rc = decode_neg_token_init(security_blob, blob_length, + if (rc) + goto neg_exit; + if (blob_length) + rc = decode_neg_token_init(security_blob, blob_length, &server->sec_type); if (rc == 1) rc = 0; @@ -480,9 +464,7 @@ SMB2_sess_setup(const unsigned int xid, struct cifs_ses *ses, int rc = 0; int resp_buftype; __le32 phase = NtLmNegotiate; /* NTLMSSP, if needed, is multistage */ - struct TCP_Server_Info *server; - unsigned int sec_flags; - u8 temp = 0; + struct TCP_Server_Info *server = ses->server; u16 blob_length = 0; char *security_blob; char *ntlmssp_blob = NULL; @@ -490,11 +472,9 @@ SMB2_sess_setup(const unsigned int xid, struct cifs_ses *ses, cifs_dbg(FYI, "Session Setup\n"); - if (ses->server) - server = ses->server; - else { - rc = -EIO; - return rc; + if (!server) { + WARN(1, "%s: server is NULL!\n", __func__); + return -EIO; } /* @@ -505,7 +485,8 @@ SMB2_sess_setup(const unsigned int xid, struct cifs_ses *ses, if (!ses->ntlmssp) return -ENOMEM; - ses->server->secType = RawNTLMSSP; + /* FIXME: allow for other auth types besides NTLMSSP (e.g. krb5) */ + ses->sectype = RawNTLMSSP; ssetup_ntlmssp_authenticate: if (phase == NtLmChallenge) @@ -515,28 +496,19 @@ ssetup_ntlmssp_authenticate: if (rc) return rc; - /* if any of auth flags (ie not sign or seal) are overriden use them */ - if (ses->overrideSecFlg & (~(CIFSSEC_MUST_SIGN | CIFSSEC_MUST_SEAL))) - sec_flags = ses->overrideSecFlg; /* BB FIXME fix sign flags?*/ - else /* if override flags set only sign/seal OR them with global auth */ - sec_flags = global_secflags | ses->overrideSecFlg; - - cifs_dbg(FYI, "sec_flags 0x%x\n", sec_flags); - req->hdr.SessionId = 0; /* First session, not a reauthenticate */ req->VcNumber = 0; /* MBZ */ /* to enable echos and oplocks */ req->hdr.CreditRequest = cpu_to_le16(3); /* only one of SMB2 signing flags may be set in SMB2 request */ - if ((sec_flags & CIFSSEC_MUST_SIGN) == CIFSSEC_MUST_SIGN) - temp = SMB2_NEGOTIATE_SIGNING_REQUIRED; - else if (ses->server->sec_mode & SMB2_NEGOTIATE_SIGNING_REQUIRED) - temp = SMB2_NEGOTIATE_SIGNING_REQUIRED; - else if (sec_flags & CIFSSEC_MAY_SIGN) /* MAY_SIGN is a single flag */ - temp = SMB2_NEGOTIATE_SIGNING_ENABLED; - - req->SecurityMode = temp; + if (server->sign) + req->SecurityMode = SMB2_NEGOTIATE_SIGNING_REQUIRED; + else if (global_secflags & CIFSSEC_MAY_SIGN) /* one flag unlike MUST_ */ + req->SecurityMode = SMB2_NEGOTIATE_SIGNING_ENABLED; + else + req->SecurityMode = 0; + req->Capabilities = 0; req->Channel = 0; /* MBZ */ @@ -679,7 +651,7 @@ SMB2_logoff(const unsigned int xid, struct cifs_ses *ses) /* since no tcon, smb2_init can not do this, so do here */ req->hdr.SessionId = ses->Suid; - if (server->sec_mode & SECMODE_SIGN_REQUIRED) + if (server->sign) req->hdr.Flags |= SMB2_FLAGS_SIGNED; rc = SendReceiveNoRsp(xid, ses, (char *) &req->hdr, 0); @@ -788,11 +760,12 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree, } tcon->share_flags = le32_to_cpu(rsp->ShareFlags); + tcon->capabilities = rsp->Capabilities; /* we keep caps little endian */ tcon->maximal_access = le32_to_cpu(rsp->MaximalAccess); tcon->tidStatus = CifsGood; tcon->need_reconnect = false; tcon->tid = rsp->hdr.TreeId; - strncpy(tcon->treeName, tree, MAX_TREE_SIZE); + strlcpy(tcon->treeName, tree, sizeof(tcon->treeName)); if ((rsp->Capabilities & SMB2_SHARE_CAP_DFS) && ((tcon->share_flags & SHI1005_FLAGS_DFS) == 0)) @@ -1036,6 +1009,122 @@ creat_exit: return rc; } +/* + * SMB2 IOCTL is used for both IOCTLs and FSCTLs + */ +int +SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, + u64 volatile_fid, u32 opcode, bool is_fsctl, char *in_data, + u32 indatalen, char **out_data, u32 *plen /* returned data len */) +{ + struct smb2_ioctl_req *req; + struct smb2_ioctl_rsp *rsp; + struct TCP_Server_Info *server; + struct cifs_ses *ses = tcon->ses; + struct kvec iov[2]; + int resp_buftype; + int num_iovecs; + int rc = 0; + + cifs_dbg(FYI, "SMB2 IOCTL\n"); + + /* zero out returned data len, in case of error */ + if (plen) + *plen = 0; + + if (ses && (ses->server)) + server = ses->server; + else + return -EIO; + + rc = small_smb2_init(SMB2_IOCTL, tcon, (void **) &req); + if (rc) + return rc; + + req->CtlCode = cpu_to_le32(opcode); + req->PersistentFileId = persistent_fid; + req->VolatileFileId = volatile_fid; + + if (indatalen) { + req->InputCount = cpu_to_le32(indatalen); + /* do not set InputOffset if no input data */ + req->InputOffset = + cpu_to_le32(offsetof(struct smb2_ioctl_req, Buffer) - 4); + iov[1].iov_base = in_data; + iov[1].iov_len = indatalen; + num_iovecs = 2; + } else + num_iovecs = 1; + + req->OutputOffset = 0; + req->OutputCount = 0; /* MBZ */ + + /* + * Could increase MaxOutputResponse, but that would require more + * than one credit. Windows typically sets this smaller, but for some + * ioctls it may be useful to allow server to send more. No point + * limiting what the server can send as long as fits in one credit + */ + req->MaxOutputResponse = cpu_to_le32(0xFF00); /* < 64K uses 1 credit */ + + if (is_fsctl) + req->Flags = cpu_to_le32(SMB2_0_IOCTL_IS_FSCTL); + else + req->Flags = 0; + + iov[0].iov_base = (char *)req; + /* 4 for rfc1002 length field */ + iov[0].iov_len = get_rfc1002_length(req) + 4; + + if (indatalen) + inc_rfc1001_len(req, indatalen); + + rc = SendReceive2(xid, ses, iov, num_iovecs, &resp_buftype, 0); + rsp = (struct smb2_ioctl_rsp *)iov[0].iov_base; + + if (rc != 0) { + if (tcon) + cifs_stats_fail_inc(tcon, SMB2_IOCTL_HE); + goto ioctl_exit; + } + + /* check if caller wants to look at return data or just return rc */ + if ((plen == NULL) || (out_data == NULL)) + goto ioctl_exit; + + *plen = le32_to_cpu(rsp->OutputCount); + + /* We check for obvious errors in the output buffer length and offset */ + if (*plen == 0) + goto ioctl_exit; /* server returned no data */ + else if (*plen > 0xFF00) { + cifs_dbg(VFS, "srv returned invalid ioctl length: %d\n", *plen); + *plen = 0; + rc = -EIO; + goto ioctl_exit; + } + + if (get_rfc1002_length(rsp) < le32_to_cpu(rsp->OutputOffset) + *plen) { + cifs_dbg(VFS, "Malformed ioctl resp: len %d offset %d\n", *plen, + le32_to_cpu(rsp->OutputOffset)); + *plen = 0; + rc = -EIO; + goto ioctl_exit; + } + + *out_data = kmalloc(*plen, GFP_KERNEL); + if (*out_data == NULL) { + rc = -ENOMEM; + goto ioctl_exit; + } + + memcpy(*out_data, rsp->hdr.ProtocolId + le32_to_cpu(rsp->OutputOffset), + *plen); +ioctl_exit: + free_rsp_buf(resp_buftype, rsp); + return rc; +} + int SMB2_close(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, u64 volatile_fid) @@ -1384,8 +1473,7 @@ smb2_readv_callback(struct mid_q_entry *mid) case MID_RESPONSE_RECEIVED: credits_received = le16_to_cpu(buf->CreditRequest); /* result already set, check signature */ - if (server->sec_mode & - (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) { + if (server->sign) { int rc; rc = smb2_verify_signature(&rqst, server); diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h index 4cb4ced258cb..f31043b26bd3 100644 --- a/fs/cifs/smb2pdu.h +++ b/fs/cifs/smb2pdu.h @@ -1,7 +1,7 @@ /* * fs/cifs/smb2pdu.h * - * Copyright (c) International Business Machines Corp., 2009, 2010 + * Copyright (c) International Business Machines Corp., 2009, 2013 * Etersoft, 2012 * Author(s): Steve French (sfrench@us.ibm.com) * Pavel Shilovsky (pshilovsky@samba.org) 2012 @@ -170,6 +170,7 @@ struct smb2_negotiate_req { #define SMB20_PROT_ID 0x0202 #define SMB21_PROT_ID 0x0210 #define SMB30_PROT_ID 0x0300 +#define SMB302_PROT_ID 0x0302 #define BAD_PROT_ID 0xFFFF /* SecurityMode flags */ @@ -283,10 +284,17 @@ struct smb2_tree_connect_rsp { #define SHI1005_FLAGS_ALLOW_NAMESPACE_CACHING 0x00000400 #define SHI1005_FLAGS_ACCESS_BASED_DIRECTORY_ENUM 0x00000800 #define SHI1005_FLAGS_FORCE_LEVELII_OPLOCK 0x00001000 -#define SHI1005_FLAGS_ENABLE_HASH 0x00002000 +#define SHI1005_FLAGS_ENABLE_HASH_V1 0x00002000 +#define SHI1005_FLAGS_ENABLE_HASH_V2 0x00004000 +#define SHI1005_FLAGS_ENCRYPT_DATA 0x00008000 +#define SHI1005_FLAGS_ALL 0x0000FF33 /* Possible share capabilities */ -#define SMB2_SHARE_CAP_DFS cpu_to_le32(0x00000008) +#define SMB2_SHARE_CAP_DFS cpu_to_le32(0x00000008) /* all dialects */ +#define SMB2_SHARE_CAP_CONTINUOUS_AVAILABILITY cpu_to_le32(0x00000010) /* 3.0 */ +#define SMB2_SHARE_CAP_SCALEOUT cpu_to_le32(0x00000020) /* 3.0 */ +#define SMB2_SHARE_CAP_CLUSTER cpu_to_le32(0x00000040) /* 3.0 */ +#define SMB2_SHARE_CAP_ASYMMETRIC cpu_to_le32(0x00000080) /* 3.02 */ struct smb2_tree_disconnect_req { struct smb2_hdr hdr; @@ -477,6 +485,75 @@ struct create_lease { struct lease_context lcontext; } __packed; +/* this goes in the ioctl buffer when doing a copychunk request */ +struct copychunk_ioctl { + char SourceKey[24]; + __le32 ChunkCount; /* we are only sending 1 */ + __le32 Reserved; + /* array will only be one chunk long for us */ + __le64 SourceOffset; + __le64 TargetOffset; + __le32 Length; /* how many bytes to copy */ + __u32 Reserved2; +} __packed; + +/* Response and Request are the same format */ +struct validate_negotiate_info { + __le32 Capabilities; + __u8 Guid[SMB2_CLIENT_GUID_SIZE]; + __le16 SecurityMode; + __le16 DialectCount; + __le16 Dialect[1]; +} __packed; + +#define RSS_CAPABLE 0x00000001 +#define RDMA_CAPABLE 0x00000002 + +struct network_interface_info_ioctl_rsp { + __le32 Next; /* next interface. zero if this is last one */ + __le32 IfIndex; + __le32 Capability; /* RSS or RDMA Capable */ + __le32 Reserved; + __le64 LinkSpeed; + char SockAddr_Storage[128]; +} __packed; + +#define NO_FILE_ID 0xFFFFFFFFFFFFFFFFULL /* general ioctls to srv not to file */ + +struct smb2_ioctl_req { + struct smb2_hdr hdr; + __le16 StructureSize; /* Must be 57 */ + __u16 Reserved; + __le32 CtlCode; + __u64 PersistentFileId; /* opaque endianness */ + __u64 VolatileFileId; /* opaque endianness */ + __le32 InputOffset; + __le32 InputCount; + __le32 MaxInputResponse; + __le32 OutputOffset; + __le32 OutputCount; + __le32 MaxOutputResponse; + __le32 Flags; + __u32 Reserved2; + char Buffer[0]; +} __packed; + +struct smb2_ioctl_rsp { + struct smb2_hdr hdr; + __le16 StructureSize; /* Must be 57 */ + __u16 Reserved; + __le32 CtlCode; + __u64 PersistentFileId; /* opaque endianness */ + __u64 VolatileFileId; /* opaque endianness */ + __le32 InputOffset; + __le32 InputCount; + __le32 OutputOffset; + __le32 OutputCount; + __le32 Flags; + __u32 Reserved2; + /* char * buffer[] */ +} __packed; + /* Currently defined values for close flags */ #define SMB2_CLOSE_FLAG_POSTQUERY_ATTRIB cpu_to_le16(0x0001) struct smb2_close_req { @@ -517,17 +594,25 @@ struct smb2_flush_rsp { __le16 Reserved; } __packed; +/* For read request Flags field below, following flag is defined for SMB3.02 */ +#define SMB2_READFLAG_READ_UNBUFFERED 0x01 + +/* Channel field for read and write: exactly one of following flags can be set*/ +#define SMB2_CHANNEL_NONE 0x00000000 +#define SMB2_CHANNEL_RDMA_V1 0x00000001 /* SMB3 or later */ +#define SMB2_CHANNEL_RDMA_V1_INVALIDATE 0x00000001 /* SMB3.02 or later */ + struct smb2_read_req { struct smb2_hdr hdr; __le16 StructureSize; /* Must be 49 */ __u8 Padding; /* offset from start of SMB2 header to place read */ - __u8 Reserved; + __u8 Flags; /* MBZ unless SMB3.02 or later */ __le32 Length; __le64 Offset; __u64 PersistentFileId; /* opaque endianness */ __u64 VolatileFileId; /* opaque endianness */ __le32 MinimumCount; - __le32 Channel; /* Reserved MBZ */ + __le32 Channel; /* MBZ except for SMB3 or later */ __le32 RemainingBytes; __le16 ReadChannelInfoOffset; /* Reserved MBZ */ __le16 ReadChannelInfoLength; /* Reserved MBZ */ @@ -545,8 +630,9 @@ struct smb2_read_rsp { __u8 Buffer[1]; } __packed; -/* For write request Flags field below the following flag is defined: */ -#define SMB2_WRITEFLAG_WRITE_THROUGH 0x00000001 +/* For write request Flags field below the following flags are defined: */ +#define SMB2_WRITEFLAG_WRITE_THROUGH 0x00000001 /* SMB2.1 or later */ +#define SMB2_WRITEFLAG_WRITE_UNBUFFERED 0x00000002 /* SMB3.02 or later */ struct smb2_write_req { struct smb2_hdr hdr; diff --git a/fs/cifs/smb2proto.h b/fs/cifs/smb2proto.h index 2aa3535e38ce..d4e1eb807457 100644 --- a/fs/cifs/smb2proto.h +++ b/fs/cifs/smb2proto.h @@ -111,6 +111,10 @@ extern int SMB2_open(const unsigned int xid, struct cifs_tcon *tcon, __u32 desired_access, __u32 create_disposition, __u32 file_attributes, __u32 create_options, __u8 *oplock, struct smb2_file_all_info *buf); +extern int SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, + u64 persistent_fid, u64 volatile_fid, u32 opcode, + bool is_fsctl, char *in_data, u32 indatalen, + char **out_data, u32 *plen /* returned data len */); extern int SMB2_close(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_file_id, u64 volatile_file_id); extern int SMB2_flush(const unsigned int xid, struct cifs_tcon *tcon, diff --git a/fs/cifs/smb2transport.c b/fs/cifs/smb2transport.c index 01f0ac800780..09b4fbaadeb6 100644 --- a/fs/cifs/smb2transport.c +++ b/fs/cifs/smb2transport.c @@ -116,11 +116,155 @@ smb2_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server) return rc; } +void +generate_smb3signingkey(struct TCP_Server_Info *server) +{ + unsigned char zero = 0x0; + __u8 i[4] = {0, 0, 0, 1}; + __u8 L[4] = {0, 0, 0, 128}; + int rc = 0; + unsigned char prfhash[SMB2_HMACSHA256_SIZE]; + unsigned char *hashptr = prfhash; + + memset(prfhash, 0x0, SMB2_HMACSHA256_SIZE); + memset(server->smb3signingkey, 0x0, SMB3_SIGNKEY_SIZE); + + rc = crypto_shash_setkey(server->secmech.hmacsha256, + server->session_key.response, SMB2_NTLMV2_SESSKEY_SIZE); + if (rc) { + cifs_dbg(VFS, "%s: Could not set with session key\n", __func__); + goto smb3signkey_ret; + } + + rc = crypto_shash_init(&server->secmech.sdeschmacsha256->shash); + if (rc) { + cifs_dbg(VFS, "%s: Could not init sign hmac\n", __func__); + goto smb3signkey_ret; + } + + rc = crypto_shash_update(&server->secmech.sdeschmacsha256->shash, + i, 4); + if (rc) { + cifs_dbg(VFS, "%s: Could not update with n\n", __func__); + goto smb3signkey_ret; + } + + rc = crypto_shash_update(&server->secmech.sdeschmacsha256->shash, + "SMB2AESCMAC", 12); + if (rc) { + cifs_dbg(VFS, "%s: Could not update with label\n", __func__); + goto smb3signkey_ret; + } + + rc = crypto_shash_update(&server->secmech.sdeschmacsha256->shash, + &zero, 1); + if (rc) { + cifs_dbg(VFS, "%s: Could not update with zero\n", __func__); + goto smb3signkey_ret; + } + + rc = crypto_shash_update(&server->secmech.sdeschmacsha256->shash, + "SmbSign", 8); + if (rc) { + cifs_dbg(VFS, "%s: Could not update with context\n", __func__); + goto smb3signkey_ret; + } + + rc = crypto_shash_update(&server->secmech.sdeschmacsha256->shash, + L, 4); + if (rc) { + cifs_dbg(VFS, "%s: Could not update with L\n", __func__); + goto smb3signkey_ret; + } + + rc = crypto_shash_final(&server->secmech.sdeschmacsha256->shash, + hashptr); + if (rc) { + cifs_dbg(VFS, "%s: Could not generate sha256 hash\n", __func__); + goto smb3signkey_ret; + } + + memcpy(server->smb3signingkey, hashptr, SMB3_SIGNKEY_SIZE); + +smb3signkey_ret: + return; +} + int smb3_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server) { - cifs_dbg(FYI, "smb3 signatures not supported yet\n"); - return -EOPNOTSUPP; + int i, rc; + unsigned char smb3_signature[SMB2_CMACAES_SIZE]; + unsigned char *sigptr = smb3_signature; + struct kvec *iov = rqst->rq_iov; + int n_vec = rqst->rq_nvec; + struct smb2_hdr *smb2_pdu = (struct smb2_hdr *)iov[0].iov_base; + + memset(smb3_signature, 0x0, SMB2_CMACAES_SIZE); + memset(smb2_pdu->Signature, 0x0, SMB2_SIGNATURE_SIZE); + + rc = crypto_shash_setkey(server->secmech.cmacaes, + server->smb3signingkey, SMB2_CMACAES_SIZE); + if (rc) { + cifs_dbg(VFS, "%s: Could not set key for cmac aes\n", __func__); + return rc; + } + + rc = crypto_shash_init(&server->secmech.sdesccmacaes->shash); + if (rc) { + cifs_dbg(VFS, "%s: Could not init cmac aes\n", __func__); + return rc; + } + + for (i = 0; i < n_vec; i++) { + if (iov[i].iov_len == 0) + continue; + if (iov[i].iov_base == NULL) { + cifs_dbg(VFS, "null iovec entry"); + return -EIO; + } + /* + * The first entry includes a length field (which does not get + * signed that occupies the first 4 bytes before the header). + */ + if (i == 0) { + if (iov[0].iov_len <= 8) /* cmd field at offset 9 */ + break; /* nothing to sign or corrupt header */ + rc = + crypto_shash_update( + &server->secmech.sdesccmacaes->shash, + iov[i].iov_base + 4, iov[i].iov_len - 4); + } else { + rc = + crypto_shash_update( + &server->secmech.sdesccmacaes->shash, + iov[i].iov_base, iov[i].iov_len); + } + if (rc) { + cifs_dbg(VFS, "%s: Couldn't update cmac aes with payload\n", + __func__); + return rc; + } + } + + /* now hash over the rq_pages array */ + for (i = 0; i < rqst->rq_npages; i++) { + struct kvec p_iov; + + cifs_rqst_page_to_kvec(rqst, i, &p_iov); + crypto_shash_update(&server->secmech.sdesccmacaes->shash, + p_iov.iov_base, p_iov.iov_len); + kunmap(rqst->rq_pages[i]); + } + + rc = crypto_shash_final(&server->secmech.sdesccmacaes->shash, + sigptr); + if (rc) + cifs_dbg(VFS, "%s: Could not generate cmac aes\n", __func__); + + memcpy(smb2_pdu->Signature, sigptr, SMB2_SIGNATURE_SIZE); + + return rc; } /* must be called with server->srv_mutex held */ @@ -275,8 +419,7 @@ smb2_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server, dump_smb(mid->resp_buf, min_t(u32, 80, len)); /* convert the length into a more usable form */ - if ((len > 24) && - (server->sec_mode & (SECMODE_SIGN_REQUIRED|SECMODE_SIGN_ENABLED))) { + if (len > 24 && server->sign) { int rc; rc = smb2_verify_signature(&rqst, server); diff --git a/fs/cifs/smbfsctl.h b/fs/cifs/smbfsctl.h index 7056b891e087..d952ee48f4dc 100644 --- a/fs/cifs/smbfsctl.h +++ b/fs/cifs/smbfsctl.h @@ -1,7 +1,7 @@ /* * fs/cifs/smbfsctl.h: SMB, CIFS, SMB2 FSCTL definitions * - * Copyright (c) International Business Machines Corp., 2002,2009 + * Copyright (c) International Business Machines Corp., 2002,2013 * Author(s): Steve French (sfrench@us.ibm.com) * * This library is free software; you can redistribute it and/or modify @@ -22,7 +22,7 @@ /* IOCTL information */ /* * List of ioctl/fsctl function codes that are or could be useful in the - * future to remote clients like cifs or SMB2 client. There is probably + * future to remote clients like cifs or SMB2/SMB3 client. This is probably * a slightly larger set of fsctls that NTFS local filesystem could handle, * including the seven below that we do not have struct definitions for. * Even with protocol definitions for most of these now available, we still @@ -30,7 +30,13 @@ * remotely. Some of the following, such as the encryption/compression ones * could be invoked from tools via a specialized hook into the VFS rather * than via the standard vfs entry points + * + * See MS-SMB2 Section 2.2.31 (last checked June 2013, all of that list are + * below). Additional detail on less common ones can be found in MS-FSCC + * section 2.3. */ +#define FSCTL_DFS_GET_REFERRALS 0x00060194 +#define FSCTL_DFS_GET_REFERRALS_EX 0x000601B0 #define FSCTL_REQUEST_OPLOCK_LEVEL_1 0x00090000 #define FSCTL_REQUEST_OPLOCK_LEVEL_2 0x00090004 #define FSCTL_REQUEST_BATCH_OPLOCK 0x00090008 @@ -71,14 +77,31 @@ #define FSCTL_SET_SHORT_NAME_BEHAVIOR 0x000901B4 /* BB add struct */ #define FSCTL_QUERY_ALLOCATED_RANGES 0x000940CF /* BB add struct */ #define FSCTL_SET_DEFECT_MANAGEMENT 0x00098134 /* BB add struct */ +#define FSCTL_FILE_LEVEL_TRIM 0x00098208 /* BB add struct */ #define FSCTL_SIS_LINK_FILES 0x0009C104 #define FSCTL_PIPE_PEEK 0x0011400C /* BB add struct */ #define FSCTL_PIPE_TRANSCEIVE 0x0011C017 /* BB add struct */ /* strange that the number for this op is not sequential with previous op */ #define FSCTL_PIPE_WAIT 0x00110018 /* BB add struct */ +/* Enumerate previous versions of a file */ +#define FSCTL_SRV_ENUMERATE_SNAPSHOTS 0x00144064 +/* Retrieve an opaque file reference for server-side data movement ie copy */ +#define FSCTL_SRV_REQUEST_RESUME_KEY 0x00140078 +#define FSCTL_LMR_REQUEST_RESILIENCY 0x001401D4 /* BB add struct */ #define FSCTL_LMR_GET_LINK_TRACK_INF 0x001400E8 /* BB add struct */ #define FSCTL_LMR_SET_LINK_TRACK_INF 0x001400EC /* BB add struct */ +#define FSCTL_VALIDATE_NEGOTIATE_INFO 0x00140204 /* BB add struct */ +/* Perform server-side data movement */ +#define FSCTL_SRV_COPYCHUNK 0x001440F2 +#define FSCTL_SRV_COPYCHUNK_WRITE 0x001480F2 +#define FSCTL_QUERY_NETWORK_INTERFACE_INFO 0x001401FC /* BB add struct */ +#define FSCTL_SRV_READ_HASH 0x001441BB /* BB add struct */ #define IO_REPARSE_TAG_MOUNT_POINT 0xA0000003 #define IO_REPARSE_TAG_HSM 0xC0000004 #define IO_REPARSE_TAG_SIS 0x80000007 + +/* fsctl flags */ +/* If Flags is set to this value, the request is an FSCTL not ioctl request */ +#define SMB2_0_IOCTL_IS_FSCTL 0x00000001 + diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c index bfbf4700d160..6fdcb1b4a106 100644 --- a/fs/cifs/transport.c +++ b/fs/cifs/transport.c @@ -447,7 +447,7 @@ wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ) { int error; - error = wait_event_freezekillable(server->response_q, + error = wait_event_freezekillable_unsafe(server->response_q, midQ->mid_state != MID_REQUEST_SUBMITTED); if (error < 0) return -ERESTARTSYS; @@ -463,7 +463,7 @@ cifs_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst) struct mid_q_entry *mid; /* enable signing if server requires it */ - if (server->sec_mode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) + if (server->sign) hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE; mid = AllocMidQEntry(hdr, server); @@ -612,7 +612,7 @@ cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server, dump_smb(mid->resp_buf, min_t(u32, 92, len)); /* convert the length into a more usable form */ - if (server->sec_mode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) { + if (server->sign) { struct kvec iov; int rc = 0; struct smb_rqst rqst = { .rq_iov = &iov, diff --git a/fs/coda/dir.c b/fs/coda/dir.c index b7d3a05c062c..14a14808320c 100644 --- a/fs/coda/dir.c +++ b/fs/coda/dir.c @@ -43,15 +43,14 @@ static int coda_rename(struct inode *old_inode, struct dentry *old_dentry, struct inode *new_inode, struct dentry *new_dentry); /* dir file-ops */ -static int coda_readdir(struct file *file, void *buf, filldir_t filldir); +static int coda_readdir(struct file *file, struct dir_context *ctx); /* dentry ops */ static int coda_dentry_revalidate(struct dentry *de, unsigned int flags); static int coda_dentry_delete(const struct dentry *); /* support routines */ -static int coda_venus_readdir(struct file *coda_file, void *buf, - filldir_t filldir); +static int coda_venus_readdir(struct file *, struct dir_context *); /* same as fs/bad_inode.c */ static int coda_return_EIO(void) @@ -85,7 +84,7 @@ const struct inode_operations coda_dir_inode_operations = const struct file_operations coda_dir_operations = { .llseek = generic_file_llseek, .read = generic_read_dir, - .readdir = coda_readdir, + .iterate = coda_readdir, .open = coda_open, .release = coda_release, .fsync = coda_fsync, @@ -378,7 +377,7 @@ static int coda_rename(struct inode *old_dir, struct dentry *old_dentry, /* file operations for directories */ -static int coda_readdir(struct file *coda_file, void *buf, filldir_t filldir) +static int coda_readdir(struct file *coda_file, struct dir_context *ctx) { struct coda_file_info *cfi; struct file *host_file; @@ -391,30 +390,19 @@ static int coda_readdir(struct file *coda_file, void *buf, filldir_t filldir) if (!host_file->f_op) return -ENOTDIR; - if (host_file->f_op->readdir) - { - /* potemkin case: we were handed a directory inode. - * We can't use vfs_readdir because we have to keep the file - * position in sync between the coda_file and the host_file. - * and as such we need grab the inode mutex. */ + if (host_file->f_op->iterate) { struct inode *host_inode = file_inode(host_file); - mutex_lock(&host_inode->i_mutex); - host_file->f_pos = coda_file->f_pos; - ret = -ENOENT; if (!IS_DEADDIR(host_inode)) { - ret = host_file->f_op->readdir(host_file, buf, filldir); + ret = host_file->f_op->iterate(host_file, ctx); file_accessed(host_file); } - - coda_file->f_pos = host_file->f_pos; mutex_unlock(&host_inode->i_mutex); + return ret; } - else /* Venus: we must read Venus dirents from a file */ - ret = coda_venus_readdir(coda_file, buf, filldir); - - return ret; + /* Venus: we must read Venus dirents from a file */ + return coda_venus_readdir(coda_file, ctx); } static inline unsigned int CDT2DT(unsigned char cdt) @@ -437,10 +425,8 @@ static inline unsigned int CDT2DT(unsigned char cdt) } /* support routines */ -static int coda_venus_readdir(struct file *coda_file, void *buf, - filldir_t filldir) +static int coda_venus_readdir(struct file *coda_file, struct dir_context *ctx) { - int result = 0; /* # of entries returned */ struct coda_file_info *cfi; struct coda_inode_info *cii; struct file *host_file; @@ -462,23 +448,12 @@ static int coda_venus_readdir(struct file *coda_file, void *buf, vdir = kmalloc(sizeof(*vdir), GFP_KERNEL); if (!vdir) return -ENOMEM; - if (coda_file->f_pos == 0) { - ret = filldir(buf, ".", 1, 0, de->d_inode->i_ino, DT_DIR); - if (ret < 0) - goto out; - result++; - coda_file->f_pos++; - } - if (coda_file->f_pos == 1) { - ret = filldir(buf, "..", 2, 1, parent_ino(de), DT_DIR); - if (ret < 0) - goto out; - result++; - coda_file->f_pos++; - } + if (!dir_emit_dots(coda_file, ctx)) + goto out; + while (1) { /* read entries from the directory file */ - ret = kernel_read(host_file, coda_file->f_pos - 2, (char *)vdir, + ret = kernel_read(host_file, ctx->pos - 2, (char *)vdir, sizeof(*vdir)); if (ret < 0) { printk(KERN_ERR "coda readdir: read dir %s failed %d\n", @@ -507,32 +482,23 @@ static int coda_venus_readdir(struct file *coda_file, void *buf, /* Make sure we skip '.' and '..', we already got those */ if (name.name[0] == '.' && (name.len == 1 || - (vdir->d_name[1] == '.' && name.len == 2))) + (name.name[1] == '.' && name.len == 2))) vdir->d_fileno = name.len = 0; /* skip null entries */ if (vdir->d_fileno && name.len) { - /* try to look up this entry in the dcache, that way - * userspace doesn't have to worry about breaking - * getcwd by having mismatched inode numbers for - * internal volume mountpoints. */ - ino = find_inode_number(de, &name); - if (!ino) ino = vdir->d_fileno; - + ino = vdir->d_fileno; type = CDT2DT(vdir->d_type); - ret = filldir(buf, name.name, name.len, - coda_file->f_pos, ino, type); - /* failure means no space for filling in this round */ - if (ret < 0) break; - result++; + if (!dir_emit(ctx, name.name, name.len, ino, type)) + break; } /* we'll always have progress because d_reclen is unsigned and * we've already established it is non-zero. */ - coda_file->f_pos += vdir->d_reclen; + ctx->pos += vdir->d_reclen; } out: kfree(vdir); - return result ? result : ret; + return 0; } /* called when a cache lookup succeeds */ diff --git a/fs/compat.c b/fs/compat.c index fc3b55dce184..6af20de2c1a3 100644 --- a/fs/compat.c +++ b/fs/compat.c @@ -832,6 +832,7 @@ struct compat_old_linux_dirent { }; struct compat_readdir_callback { + struct dir_context ctx; struct compat_old_linux_dirent __user *dirent; int result; }; @@ -873,15 +874,15 @@ asmlinkage long compat_sys_old_readdir(unsigned int fd, { int error; struct fd f = fdget(fd); - struct compat_readdir_callback buf; + struct compat_readdir_callback buf = { + .ctx.actor = compat_fillonedir, + .dirent = dirent + }; if (!f.file) return -EBADF; - buf.result = 0; - buf.dirent = dirent; - - error = vfs_readdir(f.file, compat_fillonedir, &buf); + error = iterate_dir(f.file, &buf.ctx); if (buf.result) error = buf.result; @@ -897,6 +898,7 @@ struct compat_linux_dirent { }; struct compat_getdents_callback { + struct dir_context ctx; struct compat_linux_dirent __user *current_dir; struct compat_linux_dirent __user *previous; int count; @@ -951,7 +953,11 @@ asmlinkage long compat_sys_getdents(unsigned int fd, { struct fd f; struct compat_linux_dirent __user * lastdirent; - struct compat_getdents_callback buf; + struct compat_getdents_callback buf = { + .ctx.actor = compat_filldir, + .current_dir = dirent, + .count = count + }; int error; if (!access_ok(VERIFY_WRITE, dirent, count)) @@ -961,17 +967,12 @@ asmlinkage long compat_sys_getdents(unsigned int fd, if (!f.file) return -EBADF; - buf.current_dir = dirent; - buf.previous = NULL; - buf.count = count; - buf.error = 0; - - error = vfs_readdir(f.file, compat_filldir, &buf); + error = iterate_dir(f.file, &buf.ctx); if (error >= 0) error = buf.error; lastdirent = buf.previous; if (lastdirent) { - if (put_user(f.file->f_pos, &lastdirent->d_off)) + if (put_user(buf.ctx.pos, &lastdirent->d_off)) error = -EFAULT; else error = count - buf.count; @@ -983,6 +984,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd, #ifndef __ARCH_OMIT_COMPAT_SYS_GETDENTS64 struct compat_getdents_callback64 { + struct dir_context ctx; struct linux_dirent64 __user *current_dir; struct linux_dirent64 __user *previous; int count; @@ -1036,7 +1038,11 @@ asmlinkage long compat_sys_getdents64(unsigned int fd, { struct fd f; struct linux_dirent64 __user * lastdirent; - struct compat_getdents_callback64 buf; + struct compat_getdents_callback64 buf = { + .ctx.actor = compat_filldir64, + .current_dir = dirent, + .count = count + }; int error; if (!access_ok(VERIFY_WRITE, dirent, count)) @@ -1046,17 +1052,12 @@ asmlinkage long compat_sys_getdents64(unsigned int fd, if (!f.file) return -EBADF; - buf.current_dir = dirent; - buf.previous = NULL; - buf.count = count; - buf.error = 0; - - error = vfs_readdir(f.file, compat_filldir64, &buf); + error = iterate_dir(f.file, &buf.ctx); if (error >= 0) error = buf.error; lastdirent = buf.previous; if (lastdirent) { - typeof(lastdirent->d_off) d_off = f.file->f_pos; + typeof(lastdirent->d_off) d_off = buf.ctx.pos; if (__put_user_unaligned(d_off, &lastdirent->d_off)) error = -EFAULT; else diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c index 996cdc5abb85..5d19acfa7c6c 100644 --- a/fs/compat_ioctl.c +++ b/fs/compat_ioctl.c @@ -66,7 +66,6 @@ #include <linux/gigaset_dev.h> #ifdef CONFIG_BLOCK -#include <linux/loop.h> #include <linux/cdrom.h> #include <linux/fd.h> #include <scsi/scsi.h> @@ -954,8 +953,6 @@ COMPATIBLE_IOCTL(MTIOCTOP) /* Socket level stuff */ COMPATIBLE_IOCTL(FIOQSIZE) #ifdef CONFIG_BLOCK -/* loop */ -IGNORE_IOCTL(LOOP_CLR_FD) /* md calls this on random blockdevs */ IGNORE_IOCTL(RAID_VERSION) /* qemu/qemu-img might call these two on plain files for probing */ diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c index 7aabc6ad4e9b..64e5323cbbb0 100644 --- a/fs/configfs/dir.c +++ b/fs/configfs/dir.c @@ -1532,84 +1532,66 @@ static inline unsigned char dt_type(struct configfs_dirent *sd) return (sd->s_mode >> 12) & 15; } -static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir) +static int configfs_readdir(struct file *file, struct dir_context *ctx) { - struct dentry *dentry = filp->f_path.dentry; + struct dentry *dentry = file->f_path.dentry; struct super_block *sb = dentry->d_sb; struct configfs_dirent * parent_sd = dentry->d_fsdata; - struct configfs_dirent *cursor = filp->private_data; + struct configfs_dirent *cursor = file->private_data; struct list_head *p, *q = &cursor->s_sibling; ino_t ino = 0; - int i = filp->f_pos; - switch (i) { - case 0: - ino = dentry->d_inode->i_ino; - if (filldir(dirent, ".", 1, i, ino, DT_DIR) < 0) - break; - filp->f_pos++; - i++; - /* fallthrough */ - case 1: - ino = parent_ino(dentry); - if (filldir(dirent, "..", 2, i, ino, DT_DIR) < 0) - break; - filp->f_pos++; - i++; - /* fallthrough */ - default: - if (filp->f_pos == 2) { - spin_lock(&configfs_dirent_lock); - list_move(q, &parent_sd->s_children); - spin_unlock(&configfs_dirent_lock); - } - for (p=q->next; p!= &parent_sd->s_children; p=p->next) { - struct configfs_dirent *next; - const char * name; - int len; - struct inode *inode = NULL; + if (!dir_emit_dots(file, ctx)) + return 0; + if (ctx->pos == 2) { + spin_lock(&configfs_dirent_lock); + list_move(q, &parent_sd->s_children); + spin_unlock(&configfs_dirent_lock); + } + for (p = q->next; p != &parent_sd->s_children; p = p->next) { + struct configfs_dirent *next; + const char *name; + int len; + struct inode *inode = NULL; + + next = list_entry(p, struct configfs_dirent, s_sibling); + if (!next->s_element) + continue; - next = list_entry(p, struct configfs_dirent, - s_sibling); - if (!next->s_element) - continue; - - name = configfs_get_name(next); - len = strlen(name); - - /* - * We'll have a dentry and an inode for - * PINNED items and for open attribute - * files. We lock here to prevent a race - * with configfs_d_iput() clearing - * s_dentry before calling iput(). - * - * Why do we go to the trouble? If - * someone has an attribute file open, - * the inode number should match until - * they close it. Beyond that, we don't - * care. - */ - spin_lock(&configfs_dirent_lock); - dentry = next->s_dentry; - if (dentry) - inode = dentry->d_inode; - if (inode) - ino = inode->i_ino; - spin_unlock(&configfs_dirent_lock); - if (!inode) - ino = iunique(sb, 2); + name = configfs_get_name(next); + len = strlen(name); + + /* + * We'll have a dentry and an inode for + * PINNED items and for open attribute + * files. We lock here to prevent a race + * with configfs_d_iput() clearing + * s_dentry before calling iput(). + * + * Why do we go to the trouble? If + * someone has an attribute file open, + * the inode number should match until + * they close it. Beyond that, we don't + * care. + */ + spin_lock(&configfs_dirent_lock); + dentry = next->s_dentry; + if (dentry) + inode = dentry->d_inode; + if (inode) + ino = inode->i_ino; + spin_unlock(&configfs_dirent_lock); + if (!inode) + ino = iunique(sb, 2); - if (filldir(dirent, name, len, filp->f_pos, ino, - dt_type(next)) < 0) - return 0; + if (!dir_emit(ctx, name, len, ino, dt_type(next))) + return 0; - spin_lock(&configfs_dirent_lock); - list_move(q, p); - spin_unlock(&configfs_dirent_lock); - p = q; - filp->f_pos++; - } + spin_lock(&configfs_dirent_lock); + list_move(q, p); + spin_unlock(&configfs_dirent_lock); + p = q; + ctx->pos++; } return 0; } @@ -1661,7 +1643,7 @@ const struct file_operations configfs_dir_operations = { .release = configfs_dir_close, .llseek = configfs_dir_lseek, .read = generic_read_dir, - .readdir = configfs_readdir, + .iterate = configfs_readdir, }; int configfs_register_subsystem(struct configfs_subsystem *subsys) diff --git a/fs/configfs/file.c b/fs/configfs/file.c index 2b6cb23dd14e..1d1c41f1014d 100644 --- a/fs/configfs/file.c +++ b/fs/configfs/file.c @@ -203,7 +203,7 @@ configfs_write_file(struct file *file, const char __user *buf, size_t count, lof mutex_lock(&buffer->mutex); len = fill_write_buffer(buffer, buf, count); if (len > 0) - len = flush_write_buffer(file->f_path.dentry, buffer, count); + len = flush_write_buffer(file->f_path.dentry, buffer, len); if (len > 0) *ppos += len; mutex_unlock(&buffer->mutex); diff --git a/fs/coredump.c b/fs/coredump.c index dafafbafa731..72f816d6cad9 100644 --- a/fs/coredump.c +++ b/fs/coredump.c @@ -45,69 +45,79 @@ #include <trace/events/sched.h> int core_uses_pid; -char core_pattern[CORENAME_MAX_SIZE] = "core"; unsigned int core_pipe_limit; +char core_pattern[CORENAME_MAX_SIZE] = "core"; +static int core_name_size = CORENAME_MAX_SIZE; struct core_name { char *corename; int used, size; }; -static atomic_t call_count = ATOMIC_INIT(1); /* The maximal length of core_pattern is also specified in sysctl.c */ -static int expand_corename(struct core_name *cn) +static int expand_corename(struct core_name *cn, int size) { - char *old_corename = cn->corename; - - cn->size = CORENAME_MAX_SIZE * atomic_inc_return(&call_count); - cn->corename = krealloc(old_corename, cn->size, GFP_KERNEL); + char *corename = krealloc(cn->corename, size, GFP_KERNEL); - if (!cn->corename) { - kfree(old_corename); + if (!corename) return -ENOMEM; - } + if (size > core_name_size) /* racy but harmless */ + core_name_size = size; + + cn->size = ksize(corename); + cn->corename = corename; return 0; } +static int cn_vprintf(struct core_name *cn, const char *fmt, va_list arg) +{ + int free, need; + +again: + free = cn->size - cn->used; + need = vsnprintf(cn->corename + cn->used, free, fmt, arg); + if (need < free) { + cn->used += need; + return 0; + } + + if (!expand_corename(cn, cn->size + need - free + 1)) + goto again; + + return -ENOMEM; +} + static int cn_printf(struct core_name *cn, const char *fmt, ...) { - char *cur; - int need; - int ret; va_list arg; + int ret; va_start(arg, fmt); - need = vsnprintf(NULL, 0, fmt, arg); + ret = cn_vprintf(cn, fmt, arg); va_end(arg); - if (likely(need < cn->size - cn->used - 1)) - goto out_printf; + return ret; +} - ret = expand_corename(cn); - if (ret) - goto expand_fail; +static int cn_esc_printf(struct core_name *cn, const char *fmt, ...) +{ + int cur = cn->used; + va_list arg; + int ret; -out_printf: - cur = cn->corename + cn->used; va_start(arg, fmt); - vsnprintf(cur, need + 1, fmt, arg); + ret = cn_vprintf(cn, fmt, arg); va_end(arg); - cn->used += need; - return 0; -expand_fail: + for (; cur < cn->used; ++cur) { + if (cn->corename[cur] == '/') + cn->corename[cur] = '!'; + } return ret; } -static void cn_escape(char *str) -{ - for (; *str; str++) - if (*str == '/') - *str = '!'; -} - static int cn_print_exe_file(struct core_name *cn) { struct file *exe_file; @@ -115,12 +125,8 @@ static int cn_print_exe_file(struct core_name *cn) int ret; exe_file = get_mm_exe_file(current->mm); - if (!exe_file) { - char *commstart = cn->corename + cn->used; - ret = cn_printf(cn, "%s (path unknown)", current->comm); - cn_escape(commstart); - return ret; - } + if (!exe_file) + return cn_esc_printf(cn, "%s (path unknown)", current->comm); pathbuf = kmalloc(PATH_MAX, GFP_TEMPORARY); if (!pathbuf) { @@ -134,9 +140,7 @@ static int cn_print_exe_file(struct core_name *cn) goto free_buf; } - cn_escape(path); - - ret = cn_printf(cn, "%s", path); + ret = cn_esc_printf(cn, "%s", path); free_buf: kfree(pathbuf); @@ -157,19 +161,19 @@ static int format_corename(struct core_name *cn, struct coredump_params *cprm) int pid_in_pattern = 0; int err = 0; - cn->size = CORENAME_MAX_SIZE * atomic_read(&call_count); - cn->corename = kmalloc(cn->size, GFP_KERNEL); cn->used = 0; - - if (!cn->corename) + cn->corename = NULL; + if (expand_corename(cn, core_name_size)) return -ENOMEM; + cn->corename[0] = '\0'; + + if (ispipe) + ++pat_ptr; /* Repeat as long as we have more pattern to process and more output space */ while (*pat_ptr) { if (*pat_ptr != '%') { - if (*pat_ptr == 0) - goto out; err = cn_printf(cn, "%c", *pat_ptr++); } else { switch (*++pat_ptr) { @@ -210,22 +214,16 @@ static int format_corename(struct core_name *cn, struct coredump_params *cprm) break; } /* hostname */ - case 'h': { - char *namestart = cn->corename + cn->used; + case 'h': down_read(&uts_sem); - err = cn_printf(cn, "%s", + err = cn_esc_printf(cn, "%s", utsname()->nodename); up_read(&uts_sem); - cn_escape(namestart); break; - } /* executable */ - case 'e': { - char *commstart = cn->corename + cn->used; - err = cn_printf(cn, "%s", current->comm); - cn_escape(commstart); + case 'e': + err = cn_esc_printf(cn, "%s", current->comm); break; - } case 'E': err = cn_print_exe_file(cn); break; @@ -244,6 +242,7 @@ static int format_corename(struct core_name *cn, struct coredump_params *cprm) return err; } +out: /* Backward compatibility with core_uses_pid: * * If core_pattern does not include a %p (as is the default) @@ -254,7 +253,6 @@ static int format_corename(struct core_name *cn, struct coredump_params *cprm) if (err) return err; } -out: return ispipe; } @@ -549,7 +547,7 @@ void do_coredump(siginfo_t *siginfo) if (ispipe < 0) { printk(KERN_WARNING "format_corename failed\n"); printk(KERN_WARNING "Aborting core\n"); - goto fail_corename; + goto fail_unlock; } if (cprm.limit == 1) { @@ -584,7 +582,7 @@ void do_coredump(siginfo_t *siginfo) goto fail_dropcount; } - helper_argv = argv_split(GFP_KERNEL, cn.corename+1, NULL); + helper_argv = argv_split(GFP_KERNEL, cn.corename, NULL); if (!helper_argv) { printk(KERN_WARNING "%s failed to allocate memory\n", __func__); @@ -601,7 +599,7 @@ void do_coredump(siginfo_t *siginfo) argv_free(helper_argv); if (retval) { - printk(KERN_INFO "Core dump to %s pipe failed\n", + printk(KERN_INFO "Core dump to |%s pipe failed\n", cn.corename); goto close_fail; } @@ -669,7 +667,6 @@ fail_dropcount: atomic_dec(&core_dump_count); fail_unlock: kfree(cn.corename); -fail_corename: coredump_finish(mm, core_dumped); revert_creds(old_cred); fail_creds: diff --git a/fs/cramfs/inode.c b/fs/cramfs/inode.c index 35b1c7bd18b7..e501ac3a49ff 100644 --- a/fs/cramfs/inode.c +++ b/fs/cramfs/inode.c @@ -349,18 +349,17 @@ static int cramfs_statfs(struct dentry *dentry, struct kstatfs *buf) /* * Read a cramfs directory entry. */ -static int cramfs_readdir(struct file *filp, void *dirent, filldir_t filldir) +static int cramfs_readdir(struct file *file, struct dir_context *ctx) { - struct inode *inode = file_inode(filp); + struct inode *inode = file_inode(file); struct super_block *sb = inode->i_sb; char *buf; unsigned int offset; - int copied; /* Offset within the thing. */ - offset = filp->f_pos; - if (offset >= inode->i_size) + if (ctx->pos >= inode->i_size) return 0; + offset = ctx->pos; /* Directory entries are always 4-byte aligned */ if (offset & 3) return -EINVAL; @@ -369,14 +368,13 @@ static int cramfs_readdir(struct file *filp, void *dirent, filldir_t filldir) if (!buf) return -ENOMEM; - copied = 0; while (offset < inode->i_size) { struct cramfs_inode *de; unsigned long nextoffset; char *name; ino_t ino; umode_t mode; - int namelen, error; + int namelen; mutex_lock(&read_mutex); de = cramfs_read(sb, OFFSET(inode) + offset, sizeof(*de)+CRAMFS_MAXPATHLEN); @@ -402,13 +400,10 @@ static int cramfs_readdir(struct file *filp, void *dirent, filldir_t filldir) break; namelen--; } - error = filldir(dirent, buf, namelen, offset, ino, mode >> 12); - if (error) + if (!dir_emit(ctx, buf, namelen, ino, mode >> 12)) break; - offset = nextoffset; - filp->f_pos = offset; - copied++; + ctx->pos = offset = nextoffset; } kfree(buf); return 0; @@ -547,7 +542,7 @@ static const struct address_space_operations cramfs_aops = { static const struct file_operations cramfs_directory_operations = { .llseek = generic_file_llseek, .read = generic_read_dir, - .readdir = cramfs_readdir, + .iterate = cramfs_readdir, }; static const struct inode_operations cramfs_dir_inode_operations = { diff --git a/fs/dcache.c b/fs/dcache.c index f09b9085f7d8..87bdb5329c3c 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -1612,6 +1612,10 @@ EXPORT_SYMBOL(d_obtain_alias); * If a dentry was found and moved, then it is returned. Otherwise NULL * is returned. This matches the expected return value of ->lookup. * + * Cluster filesystems may call this function with a negative, hashed dentry. + * In that case, we know that the inode will be a regular file, and also this + * will only occur during atomic_open. So we need to check for the dentry + * being already hashed only in the final case. */ struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry) { @@ -1636,8 +1640,11 @@ struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry) security_d_instantiate(dentry, inode); d_rehash(dentry); } - } else - d_add(dentry, inode); + } else { + d_instantiate(dentry, inode); + if (d_unhashed(dentry)) + d_rehash(dentry); + } return new; } EXPORT_SYMBOL(d_splice_alias); @@ -1723,7 +1730,7 @@ EXPORT_SYMBOL(d_add_ci); * Do the slow-case of the dentry name compare. * * Unlike the dentry_cmp() function, we need to atomically - * load the name, length and inode information, so that the + * load the name and length information, so that the * filesystem can rely on them, and can use the 'name' and * 'len' information without worrying about walking off the * end of memory etc. @@ -1741,22 +1748,18 @@ enum slow_d_compare { static noinline enum slow_d_compare slow_dentry_cmp( const struct dentry *parent, - struct inode *inode, struct dentry *dentry, unsigned int seq, const struct qstr *name) { int tlen = dentry->d_name.len; const char *tname = dentry->d_name.name; - struct inode *i = dentry->d_inode; if (read_seqcount_retry(&dentry->d_seq, seq)) { cpu_relax(); return D_COMP_SEQRETRY; } - if (parent->d_op->d_compare(parent, inode, - dentry, i, - tlen, tname, name)) + if (parent->d_op->d_compare(parent, dentry, tlen, tname, name)) return D_COMP_NOMATCH; return D_COMP_OK; } @@ -1766,7 +1769,6 @@ static noinline enum slow_d_compare slow_dentry_cmp( * @parent: parent dentry * @name: qstr of name we wish to find * @seqp: returns d_seq value at the point where the dentry was found - * @inode: returns dentry->d_inode when the inode was found valid. * Returns: dentry, or NULL * * __d_lookup_rcu is the dcache lookup function for rcu-walk name @@ -1793,7 +1795,7 @@ static noinline enum slow_d_compare slow_dentry_cmp( */ struct dentry *__d_lookup_rcu(const struct dentry *parent, const struct qstr *name, - unsigned *seqp, struct inode *inode) + unsigned *seqp) { u64 hashlen = name->hash_len; const unsigned char *str = name->name; @@ -1827,11 +1829,10 @@ struct dentry *__d_lookup_rcu(const struct dentry *parent, seqretry: /* * The dentry sequence count protects us from concurrent - * renames, and thus protects inode, parent and name fields. + * renames, and thus protects parent and name fields. * * The caller must perform a seqcount check in order - * to do anything useful with the returned dentry, - * including using the 'd_inode' pointer. + * to do anything useful with the returned dentry. * * NOTE! We do a "raw" seqcount_begin here. That means that * we don't wait for the sequence count to stabilize if it @@ -1845,12 +1846,12 @@ seqretry: continue; if (d_unhashed(dentry)) continue; - *seqp = seq; if (unlikely(parent->d_flags & DCACHE_OP_COMPARE)) { if (dentry->d_name.hash != hashlen_hash(hashlen)) continue; - switch (slow_dentry_cmp(parent, inode, dentry, seq, name)) { + *seqp = seq; + switch (slow_dentry_cmp(parent, dentry, seq, name)) { case D_COMP_OK: return dentry; case D_COMP_NOMATCH: @@ -1862,6 +1863,7 @@ seqretry: if (dentry->d_name.hash_len != hashlen) continue; + *seqp = seq; if (!dentry_cmp(dentry, str, hashlen_len(hashlen))) return dentry; } @@ -1959,9 +1961,7 @@ struct dentry *__d_lookup(const struct dentry *parent, const struct qstr *name) if (parent->d_flags & DCACHE_OP_COMPARE) { int tlen = dentry->d_name.len; const char *tname = dentry->d_name.name; - if (parent->d_op->d_compare(parent, parent->d_inode, - dentry, dentry->d_inode, - tlen, tname, name)) + if (parent->d_op->d_compare(parent, dentry, tlen, tname, name)) goto next; } else { if (dentry->d_name.len != len) @@ -1998,7 +1998,7 @@ struct dentry *d_hash_and_lookup(struct dentry *dir, struct qstr *name) */ name->hash = full_name_hash(name->name, name->len); if (dir->d_flags & DCACHE_OP_HASH) { - int err = dir->d_op->d_hash(dir, dir->d_inode, name); + int err = dir->d_op->d_hash(dir, name); if (unlikely(err < 0)) return ERR_PTR(err); } @@ -2968,34 +2968,21 @@ rename_retry: goto again; } -/** - * find_inode_number - check for dentry with name - * @dir: directory to check - * @name: Name to find. - * - * Check whether a dentry already exists for the given name, - * and return the inode number if it has an inode. Otherwise - * 0 is returned. - * - * This routine is used to post-process directory listings for - * filesystems using synthetic inode numbers, and is necessary - * to keep getcwd() working. - */ - -ino_t find_inode_number(struct dentry *dir, struct qstr *name) +void d_tmpfile(struct dentry *dentry, struct inode *inode) { - struct dentry * dentry; - ino_t ino = 0; - - dentry = d_hash_and_lookup(dir, name); - if (!IS_ERR_OR_NULL(dentry)) { - if (dentry->d_inode) - ino = dentry->d_inode->i_ino; - dput(dentry); - } - return ino; + inode_dec_link_count(inode); + BUG_ON(dentry->d_name.name != dentry->d_iname || + !hlist_unhashed(&dentry->d_alias) || + !d_unlinked(dentry)); + spin_lock(&dentry->d_parent->d_lock); + spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); + dentry->d_name.len = sprintf(dentry->d_iname, "#%llu", + (unsigned long long)inode->i_ino); + spin_unlock(&dentry->d_lock); + spin_unlock(&dentry->d_parent->d_lock); + d_instantiate(dentry, inode); } -EXPORT_SYMBOL(find_inode_number); +EXPORT_SYMBOL(d_tmpfile); static __initdata unsigned long dhash_entries; static int __init set_dhash_entries(char *str) diff --git a/fs/debugfs/file.c b/fs/debugfs/file.c index c5ca6ae5a30c..63146295153b 100644 --- a/fs/debugfs/file.c +++ b/fs/debugfs/file.c @@ -21,6 +21,7 @@ #include <linux/debugfs.h> #include <linux/io.h> #include <linux/slab.h> +#include <linux/atomic.h> static ssize_t default_read_file(struct file *file, char __user *buf, size_t count, loff_t *ppos) @@ -403,6 +404,47 @@ struct dentry *debugfs_create_size_t(const char *name, umode_t mode, } EXPORT_SYMBOL_GPL(debugfs_create_size_t); +static int debugfs_atomic_t_set(void *data, u64 val) +{ + atomic_set((atomic_t *)data, val); + return 0; +} +static int debugfs_atomic_t_get(void *data, u64 *val) +{ + *val = atomic_read((atomic_t *)data); + return 0; +} +DEFINE_SIMPLE_ATTRIBUTE(fops_atomic_t, debugfs_atomic_t_get, + debugfs_atomic_t_set, "%lld\n"); +DEFINE_SIMPLE_ATTRIBUTE(fops_atomic_t_ro, debugfs_atomic_t_get, NULL, "%lld\n"); +DEFINE_SIMPLE_ATTRIBUTE(fops_atomic_t_wo, NULL, debugfs_atomic_t_set, "%lld\n"); + +/** + * debugfs_create_atomic_t - create a debugfs file that is used to read and + * write an atomic_t value + * @name: a pointer to a string containing the name of the file to create. + * @mode: the permission that the file should have + * @parent: a pointer to the parent dentry for this file. This should be a + * directory dentry if set. If this parameter is %NULL, then the + * file will be created in the root of the debugfs filesystem. + * @value: a pointer to the variable that the file should read to and write + * from. + */ +struct dentry *debugfs_create_atomic_t(const char *name, umode_t mode, + struct dentry *parent, atomic_t *value) +{ + /* if there are no write bits set, make read only */ + if (!(mode & S_IWUGO)) + return debugfs_create_file(name, mode, parent, value, + &fops_atomic_t_ro); + /* if there are no read bits set, make write only */ + if (!(mode & S_IRUGO)) + return debugfs_create_file(name, mode, parent, value, + &fops_atomic_t_wo); + + return debugfs_create_file(name, mode, parent, value, &fops_atomic_t); +} +EXPORT_SYMBOL_GPL(debugfs_create_atomic_t); static ssize_t read_file_bool(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) @@ -431,6 +473,7 @@ static ssize_t write_file_bool(struct file *file, const char __user *user_buf, if (copy_from_user(buf, user_buf, buf_size)) return -EFAULT; + buf[buf_size] = '\0'; if (strtobool(buf, &bv) == 0) *val = bv; diff --git a/fs/dlm/config.c b/fs/dlm/config.c index 7d58d5b112b5..76feb4b60fa6 100644 --- a/fs/dlm/config.c +++ b/fs/dlm/config.c @@ -138,8 +138,9 @@ static ssize_t cluster_cluster_name_read(struct dlm_cluster *cl, char *buf) static ssize_t cluster_cluster_name_write(struct dlm_cluster *cl, const char *buf, size_t len) { - strncpy(dlm_config.ci_cluster_name, buf, DLM_LOCKSPACE_LEN); - strncpy(cl->cl_cluster_name, buf, DLM_LOCKSPACE_LEN); + strlcpy(dlm_config.ci_cluster_name, buf, + sizeof(dlm_config.ci_cluster_name)); + strlcpy(cl->cl_cluster_name, buf, sizeof(cl->cl_cluster_name)); return len; } diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c index 1b1146670c4b..e223a911a834 100644 --- a/fs/dlm/lock.c +++ b/fs/dlm/lock.c @@ -2038,8 +2038,8 @@ static void set_lvb_lock_pc(struct dlm_rsb *r, struct dlm_lkb *lkb, b = dlm_lvb_operations[lkb->lkb_grmode + 1][lkb->lkb_rqmode + 1]; if (b == 1) { int len = receive_extralen(ms); - if (len > DLM_RESNAME_MAXLEN) - len = DLM_RESNAME_MAXLEN; + if (len > r->res_ls->ls_lvblen) + len = r->res_ls->ls_lvblen; memcpy(lkb->lkb_lvbptr, ms->m_extra, len); lkb->lkb_lvbseq = ms->m_lvbseq; } @@ -3893,8 +3893,8 @@ static int receive_lvb(struct dlm_ls *ls, struct dlm_lkb *lkb, if (!lkb->lkb_lvbptr) return -ENOMEM; len = receive_extralen(ms); - if (len > DLM_RESNAME_MAXLEN) - len = DLM_RESNAME_MAXLEN; + if (len > ls->ls_lvblen) + len = ls->ls_lvblen; memcpy(lkb->lkb_lvbptr, ms->m_extra, len); } return 0; diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c index 3ca79d3253b9..88556dc0458e 100644 --- a/fs/dlm/lockspace.c +++ b/fs/dlm/lockspace.c @@ -883,17 +883,24 @@ int dlm_release_lockspace(void *lockspace, int force) void dlm_stop_lockspaces(void) { struct dlm_ls *ls; + int count; restart: + count = 0; spin_lock(&lslist_lock); list_for_each_entry(ls, &lslist, ls_list) { - if (!test_bit(LSFL_RUNNING, &ls->ls_flags)) + if (!test_bit(LSFL_RUNNING, &ls->ls_flags)) { + count++; continue; + } spin_unlock(&lslist_lock); log_error(ls, "no userland control daemon, stopping lockspace"); dlm_ls_stop(ls); goto restart; } spin_unlock(&lslist_lock); + + if (count) + log_print("dlm user daemon left %d lockspaces", count); } diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c index d0ccd2fd79eb..d90909ec6aa6 100644 --- a/fs/dlm/lowcomms.c +++ b/fs/dlm/lowcomms.c @@ -52,7 +52,6 @@ #include <linux/mutex.h> #include <linux/sctp.h> #include <linux/slab.h> -#include <linux/sctp.h> #include <net/sctp/sctp.h> #include <net/ipv6.h> @@ -126,6 +125,7 @@ struct connection { struct connection *othercon; struct work_struct rwork; /* Receive workqueue */ struct work_struct swork; /* Send workqueue */ + bool try_new_addr; }; #define sock2con(x) ((struct connection *)(x)->sk_user_data) @@ -144,6 +144,7 @@ struct dlm_node_addr { struct list_head list; int nodeid; int addr_count; + int curr_addr_index; struct sockaddr_storage *addr[DLM_MAX_ADDR_COUNT]; }; @@ -310,7 +311,7 @@ static int addr_compare(struct sockaddr_storage *x, struct sockaddr_storage *y) } static int nodeid_to_addr(int nodeid, struct sockaddr_storage *sas_out, - struct sockaddr *sa_out) + struct sockaddr *sa_out, bool try_new_addr) { struct sockaddr_storage sas; struct dlm_node_addr *na; @@ -320,8 +321,16 @@ static int nodeid_to_addr(int nodeid, struct sockaddr_storage *sas_out, spin_lock(&dlm_node_addrs_spin); na = find_node_addr(nodeid); - if (na && na->addr_count) - memcpy(&sas, na->addr[0], sizeof(struct sockaddr_storage)); + if (na && na->addr_count) { + if (try_new_addr) { + na->curr_addr_index++; + if (na->curr_addr_index == na->addr_count) + na->curr_addr_index = 0; + } + + memcpy(&sas, na->addr[na->curr_addr_index ], + sizeof(struct sockaddr_storage)); + } spin_unlock(&dlm_node_addrs_spin); if (!na) @@ -353,19 +362,22 @@ static int addr_to_nodeid(struct sockaddr_storage *addr, int *nodeid) { struct dlm_node_addr *na; int rv = -EEXIST; + int addr_i; spin_lock(&dlm_node_addrs_spin); list_for_each_entry(na, &dlm_node_addrs, list) { if (!na->addr_count) continue; - if (!addr_compare(na->addr[0], addr)) - continue; - - *nodeid = na->nodeid; - rv = 0; - break; + for (addr_i = 0; addr_i < na->addr_count; addr_i++) { + if (addr_compare(na->addr[addr_i], addr)) { + *nodeid = na->nodeid; + rv = 0; + goto unlock; + } + } } +unlock: spin_unlock(&dlm_node_addrs_spin); return rv; } @@ -561,8 +573,23 @@ static void sctp_send_shutdown(sctp_assoc_t associd) static void sctp_init_failed_foreach(struct connection *con) { + + /* + * Don't try to recover base con and handle race where the + * other node's assoc init creates a assoc and we get that + * notification, then we get a notification that our attempt + * failed due. This happens when we are still trying the primary + * address, but the other node has already tried secondary addrs + * and found one that worked. + */ + if (!con->nodeid || con->sctp_assoc) + return; + + log_print("Retrying SCTP association init for node %d\n", con->nodeid); + + con->try_new_addr = true; con->sctp_assoc = 0; - if (test_and_clear_bit(CF_CONNECT_PENDING, &con->flags)) { + if (test_and_clear_bit(CF_INIT_PENDING, &con->flags)) { if (!test_and_set_bit(CF_WRITE_PENDING, &con->flags)) queue_work(send_workqueue, &con->swork); } @@ -579,15 +606,56 @@ static void sctp_init_failed(void) mutex_unlock(&connections_lock); } +static void retry_failed_sctp_send(struct connection *recv_con, + struct sctp_send_failed *sn_send_failed, + char *buf) +{ + int len = sn_send_failed->ssf_length - sizeof(struct sctp_send_failed); + struct dlm_mhandle *mh; + struct connection *con; + char *retry_buf; + int nodeid = sn_send_failed->ssf_info.sinfo_ppid; + + log_print("Retry sending %d bytes to node id %d", len, nodeid); + + con = nodeid2con(nodeid, 0); + if (!con) { + log_print("Could not look up con for nodeid %d\n", + nodeid); + return; + } + + mh = dlm_lowcomms_get_buffer(nodeid, len, GFP_NOFS, &retry_buf); + if (!mh) { + log_print("Could not allocate buf for retry."); + return; + } + memcpy(retry_buf, buf + sizeof(struct sctp_send_failed), len); + dlm_lowcomms_commit_buffer(mh); + + /* + * If we got a assoc changed event before the send failed event then + * we only need to retry the send. + */ + if (con->sctp_assoc) { + if (!test_and_set_bit(CF_WRITE_PENDING, &con->flags)) + queue_work(send_workqueue, &con->swork); + } else + sctp_init_failed_foreach(con); +} + /* Something happened to an association */ static void process_sctp_notification(struct connection *con, struct msghdr *msg, char *buf) { union sctp_notification *sn = (union sctp_notification *)buf; - if (sn->sn_header.sn_type == SCTP_ASSOC_CHANGE) { + switch (sn->sn_header.sn_type) { + case SCTP_SEND_FAILED: + retry_failed_sctp_send(con, &sn->sn_send_failed, buf); + break; + case SCTP_ASSOC_CHANGE: switch (sn->sn_assoc_change.sac_state) { - case SCTP_COMM_UP: case SCTP_RESTART: { @@ -662,9 +730,11 @@ static void process_sctp_notification(struct connection *con, log_print("connecting to %d sctp association %d", nodeid, (int)sn->sn_assoc_change.sac_assoc_id); + new_con->sctp_assoc = sn->sn_assoc_change.sac_assoc_id; + new_con->try_new_addr = false; /* Send any pending writes */ clear_bit(CF_CONNECT_PENDING, &new_con->flags); - clear_bit(CF_INIT_PENDING, &con->flags); + clear_bit(CF_INIT_PENDING, &new_con->flags); if (!test_and_set_bit(CF_WRITE_PENDING, &new_con->flags)) { queue_work(send_workqueue, &new_con->swork); } @@ -683,14 +753,10 @@ static void process_sctp_notification(struct connection *con, } break; - /* We don't know which INIT failed, so clear the PENDING flags - * on them all. if assoc_id is zero then it will then try - * again */ - case SCTP_CANT_STR_ASSOC: { + /* Will retry init when we get the send failed notification */ log_print("Can't start SCTP association - retrying"); - sctp_init_failed(); } break; @@ -699,6 +765,8 @@ static void process_sctp_notification(struct connection *con, (int)sn->sn_assoc_change.sac_assoc_id, sn->sn_assoc_change.sac_state); } + default: + ; /* fall through */ } } @@ -958,6 +1026,24 @@ static void free_entry(struct writequeue_entry *e) kfree(e); } +/* + * writequeue_entry_complete - try to delete and free write queue entry + * @e: write queue entry to try to delete + * @completed: bytes completed + * + * writequeue_lock must be held. + */ +static void writequeue_entry_complete(struct writequeue_entry *e, int completed) +{ + e->offset += completed; + e->len -= completed; + + if (e->len == 0 && e->users == 0) { + list_del(&e->list); + free_entry(e); + } +} + /* Initiate an SCTP association. This is a special case of send_to_sock() in that we don't yet have a peeled-off socket for this association, so we use the listening socket @@ -977,15 +1063,14 @@ static void sctp_init_assoc(struct connection *con) int addrlen; struct kvec iov[1]; + mutex_lock(&con->sock_mutex); if (test_and_set_bit(CF_INIT_PENDING, &con->flags)) - return; - - if (con->retries++ > MAX_CONNECT_RETRIES) - return; + goto unlock; - if (nodeid_to_addr(con->nodeid, NULL, (struct sockaddr *)&rem_addr)) { + if (nodeid_to_addr(con->nodeid, NULL, (struct sockaddr *)&rem_addr, + con->try_new_addr)) { log_print("no address for nodeid %d", con->nodeid); - return; + goto unlock; } base_con = nodeid2con(0, 0); BUG_ON(base_con == NULL); @@ -1003,17 +1088,25 @@ static void sctp_init_assoc(struct connection *con) if (list_empty(&con->writequeue)) { spin_unlock(&con->writequeue_lock); log_print("writequeue empty for nodeid %d", con->nodeid); - return; + goto unlock; } e = list_first_entry(&con->writequeue, struct writequeue_entry, list); len = e->len; offset = e->offset; - spin_unlock(&con->writequeue_lock); /* Send the first block off the write queue */ iov[0].iov_base = page_address(e->page)+offset; iov[0].iov_len = len; + spin_unlock(&con->writequeue_lock); + + if (rem_addr.ss_family == AF_INET) { + struct sockaddr_in *sin = (struct sockaddr_in *)&rem_addr; + log_print("Trying to connect to %pI4", &sin->sin_addr.s_addr); + } else { + struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&rem_addr; + log_print("Trying to connect to %pI6", &sin6->sin6_addr); + } cmsg = CMSG_FIRSTHDR(&outmessage); cmsg->cmsg_level = IPPROTO_SCTP; @@ -1021,8 +1114,9 @@ static void sctp_init_assoc(struct connection *con) cmsg->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo)); sinfo = CMSG_DATA(cmsg); memset(sinfo, 0x00, sizeof(struct sctp_sndrcvinfo)); - sinfo->sinfo_ppid = cpu_to_le32(dlm_our_nodeid()); + sinfo->sinfo_ppid = cpu_to_le32(con->nodeid); outmessage.msg_controllen = cmsg->cmsg_len; + sinfo->sinfo_flags |= SCTP_ADDR_OVER; ret = kernel_sendmsg(base_con->sock, &outmessage, iov, 1, len); if (ret < 0) { @@ -1035,15 +1129,12 @@ static void sctp_init_assoc(struct connection *con) } else { spin_lock(&con->writequeue_lock); - e->offset += ret; - e->len -= ret; - - if (e->len == 0 && e->users == 0) { - list_del(&e->list); - free_entry(e); - } + writequeue_entry_complete(e, ret); spin_unlock(&con->writequeue_lock); } + +unlock: + mutex_unlock(&con->sock_mutex); } /* Connect a new socket to its peer */ @@ -1075,7 +1166,7 @@ static void tcp_connect_to_sock(struct connection *con) goto out_err; memset(&saddr, 0, sizeof(saddr)); - result = nodeid_to_addr(con->nodeid, &saddr, NULL); + result = nodeid_to_addr(con->nodeid, &saddr, NULL, false); if (result < 0) { log_print("no address for nodeid %d", con->nodeid); goto out_err; @@ -1254,6 +1345,7 @@ static int sctp_listen_for_all(void) int result = -EINVAL, num = 1, i, addr_len; struct connection *con = nodeid2con(0, GFP_NOFS); int bufsize = NEEDED_RMEM; + int one = 1; if (!con) return -ENOMEM; @@ -1288,6 +1380,11 @@ static int sctp_listen_for_all(void) goto create_delsock; } + result = kernel_setsockopt(sock, SOL_SCTP, SCTP_NODELAY, (char *)&one, + sizeof(one)); + if (result < 0) + log_print("Could not set SCTP NODELAY error %d\n", result); + /* Init con struct */ sock->sk->sk_user_data = con; con->sock = sock; @@ -1493,13 +1590,7 @@ static void send_to_sock(struct connection *con) } spin_lock(&con->writequeue_lock); - e->offset += ret; - e->len -= ret; - - if (e->len == 0 && e->users == 0) { - list_del(&e->list); - free_entry(e); - } + writequeue_entry_complete(e, ret); } spin_unlock(&con->writequeue_lock); out: diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c index f71ec125290d..cfa109a4d5a2 100644 --- a/fs/ecryptfs/crypto.c +++ b/fs/ecryptfs/crypto.c @@ -2243,12 +2243,11 @@ out: */ int ecryptfs_decode_and_decrypt_filename(char **plaintext_name, size_t *plaintext_name_size, - struct dentry *ecryptfs_dir_dentry, + struct super_block *sb, const char *name, size_t name_size) { struct ecryptfs_mount_crypt_stat *mount_crypt_stat = - &ecryptfs_superblock_to_private( - ecryptfs_dir_dentry->d_sb)->mount_crypt_stat; + &ecryptfs_superblock_to_private(sb)->mount_crypt_stat; char *decoded_name; size_t decoded_name_size; size_t packet_size; diff --git a/fs/ecryptfs/ecryptfs_kernel.h b/fs/ecryptfs/ecryptfs_kernel.h index f622a733f7ad..df19d34a033b 100644 --- a/fs/ecryptfs/ecryptfs_kernel.h +++ b/fs/ecryptfs/ecryptfs_kernel.h @@ -575,7 +575,7 @@ int ecryptfs_initialize_file(struct dentry *ecryptfs_dentry, struct inode *ecryptfs_inode); int ecryptfs_decode_and_decrypt_filename(char **decrypted_name, size_t *decrypted_name_size, - struct dentry *ecryptfs_dentry, + struct super_block *sb, const char *name, size_t name_size); int ecryptfs_fill_zeros(struct file *file, loff_t new_length); int ecryptfs_encrypt_and_encode_filename( diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c index a7abbea2c096..24f1105fda3a 100644 --- a/fs/ecryptfs/file.c +++ b/fs/ecryptfs/file.c @@ -68,9 +68,9 @@ static ssize_t ecryptfs_read_update_atime(struct kiocb *iocb, } struct ecryptfs_getdents_callback { - void *dirent; - struct dentry *dentry; - filldir_t filldir; + struct dir_context ctx; + struct dir_context *caller; + struct super_block *sb; int filldir_called; int entries_written; }; @@ -88,7 +88,7 @@ ecryptfs_filldir(void *dirent, const char *lower_name, int lower_namelen, buf->filldir_called++; rc = ecryptfs_decode_and_decrypt_filename(&name, &name_size, - buf->dentry, lower_name, + buf->sb, lower_name, lower_namelen); if (rc) { printk(KERN_ERR "%s: Error attempting to decode and decrypt " @@ -96,9 +96,10 @@ ecryptfs_filldir(void *dirent, const char *lower_name, int lower_namelen, rc); goto out; } - rc = buf->filldir(buf->dirent, name, name_size, offset, ino, d_type); + buf->caller->pos = buf->ctx.pos; + rc = !dir_emit(buf->caller, name, name_size, ino, d_type); kfree(name); - if (rc >= 0) + if (!rc) buf->entries_written++; out: return rc; @@ -107,27 +108,22 @@ out: /** * ecryptfs_readdir * @file: The eCryptfs directory file - * @dirent: Directory entry handle - * @filldir: The filldir callback function + * @ctx: The actor to feed the entries to */ -static int ecryptfs_readdir(struct file *file, void *dirent, filldir_t filldir) +static int ecryptfs_readdir(struct file *file, struct dir_context *ctx) { int rc; struct file *lower_file; - struct inode *inode; - struct ecryptfs_getdents_callback buf; - + struct inode *inode = file_inode(file); + struct ecryptfs_getdents_callback buf = { + .ctx.actor = ecryptfs_filldir, + .caller = ctx, + .sb = inode->i_sb, + }; lower_file = ecryptfs_file_to_lower(file); - lower_file->f_pos = file->f_pos; - inode = file_inode(file); - memset(&buf, 0, sizeof(buf)); - buf.dirent = dirent; - buf.dentry = file->f_path.dentry; - buf.filldir = filldir; - buf.filldir_called = 0; - buf.entries_written = 0; - rc = vfs_readdir(lower_file, ecryptfs_filldir, (void *)&buf); - file->f_pos = lower_file->f_pos; + lower_file->f_pos = ctx->pos; + rc = iterate_dir(lower_file, &buf.ctx); + ctx->pos = buf.ctx.pos; if (rc < 0) goto out; if (buf.filldir_called && !buf.entries_written) @@ -344,7 +340,7 @@ ecryptfs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) #endif const struct file_operations ecryptfs_dir_fops = { - .readdir = ecryptfs_readdir, + .iterate = ecryptfs_readdir, .read = generic_read_dir, .unlocked_ioctl = ecryptfs_unlocked_ioctl, #ifdef CONFIG_COMPAT @@ -365,7 +361,7 @@ const struct file_operations ecryptfs_main_fops = { .aio_read = ecryptfs_read_update_atime, .write = do_sync_write, .aio_write = generic_file_aio_write, - .readdir = ecryptfs_readdir, + .iterate = ecryptfs_readdir, .unlocked_ioctl = ecryptfs_unlocked_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = ecryptfs_compat_ioctl, diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c index 5eab400e2590..a2f2bb2c256d 100644 --- a/fs/ecryptfs/inode.c +++ b/fs/ecryptfs/inode.c @@ -679,7 +679,7 @@ static int ecryptfs_readlink_lower(struct dentry *dentry, char **buf, set_fs(old_fs); if (rc < 0) goto out; - rc = ecryptfs_decode_and_decrypt_filename(buf, bufsiz, dentry, + rc = ecryptfs_decode_and_decrypt_filename(buf, bufsiz, dentry->d_sb, lower_buf, rc); out: kfree(lower_buf); diff --git a/fs/efivarfs/super.c b/fs/efivarfs/super.c index 141aee31884f..a8766b880c07 100644 --- a/fs/efivarfs/super.c +++ b/fs/efivarfs/super.c @@ -45,8 +45,8 @@ static struct super_block *efivarfs_sb; * So we need to perform a case-sensitive match on part 1 and a * case-insensitive match on part 2. */ -static int efivarfs_d_compare(const struct dentry *parent, const struct inode *pinode, - const struct dentry *dentry, const struct inode *inode, +static int efivarfs_d_compare(const struct dentry *parent, + const struct dentry *dentry, unsigned int len, const char *str, const struct qstr *name) { @@ -63,8 +63,7 @@ static int efivarfs_d_compare(const struct dentry *parent, const struct inode *p return strncasecmp(name->name + guid, str + guid, EFI_VARIABLE_GUID_LEN); } -static int efivarfs_d_hash(const struct dentry *dentry, - const struct inode *inode, struct qstr *qstr) +static int efivarfs_d_hash(const struct dentry *dentry, struct qstr *qstr) { unsigned long hash = init_name_hash(); const unsigned char *s = qstr->name; @@ -108,7 +107,7 @@ static struct dentry *efivarfs_alloc_dentry(struct dentry *parent, char *name) q.name = name; q.len = strlen(name); - err = efivarfs_d_hash(NULL, NULL, &q); + err = efivarfs_d_hash(NULL, &q); if (err) return ERR_PTR(err); diff --git a/fs/efs/dir.c b/fs/efs/dir.c index 055a9e9ca747..b72307ccdf7a 100644 --- a/fs/efs/dir.c +++ b/fs/efs/dir.c @@ -7,40 +7,38 @@ #include <linux/buffer_head.h> #include "efs.h" -static int efs_readdir(struct file *, void *, filldir_t); +static int efs_readdir(struct file *, struct dir_context *); const struct file_operations efs_dir_operations = { .llseek = generic_file_llseek, .read = generic_read_dir, - .readdir = efs_readdir, + .iterate = efs_readdir, }; const struct inode_operations efs_dir_inode_operations = { .lookup = efs_lookup, }; -static int efs_readdir(struct file *filp, void *dirent, filldir_t filldir) { - struct inode *inode = file_inode(filp); - struct buffer_head *bh; - - struct efs_dir *dirblock; - struct efs_dentry *dirslot; - efs_ino_t inodenum; +static int efs_readdir(struct file *file, struct dir_context *ctx) +{ + struct inode *inode = file_inode(file); efs_block_t block; - int slot, namelen; - char *nameptr; + int slot; if (inode->i_size & (EFS_DIRBSIZE-1)) printk(KERN_WARNING "EFS: WARNING: readdir(): directory size not a multiple of EFS_DIRBSIZE\n"); /* work out where this entry can be found */ - block = filp->f_pos >> EFS_DIRBSIZE_BITS; + block = ctx->pos >> EFS_DIRBSIZE_BITS; /* each block contains at most 256 slots */ - slot = filp->f_pos & 0xff; + slot = ctx->pos & 0xff; /* look at all blocks */ while (block < inode->i_blocks) { + struct efs_dir *dirblock; + struct buffer_head *bh; + /* read the dir block */ bh = sb_bread(inode->i_sb, efs_bmap(inode, block)); @@ -57,11 +55,14 @@ static int efs_readdir(struct file *filp, void *dirent, filldir_t filldir) { break; } - while (slot < dirblock->slots) { - if (dirblock->space[slot] == 0) { - slot++; + for (; slot < dirblock->slots; slot++) { + struct efs_dentry *dirslot; + efs_ino_t inodenum; + const char *nameptr; + int namelen; + + if (dirblock->space[slot] == 0) continue; - } dirslot = (struct efs_dentry *) (((char *) bh->b_data) + EFS_SLOTAT(dirblock, slot)); @@ -72,39 +73,29 @@ static int efs_readdir(struct file *filp, void *dirent, filldir_t filldir) { #ifdef DEBUG printk(KERN_DEBUG "EFS: readdir(): block %d slot %d/%d: inode %u, name \"%s\", namelen %u\n", block, slot, dirblock->slots-1, inodenum, nameptr, namelen); #endif - if (namelen > 0) { - /* found the next entry */ - filp->f_pos = (block << EFS_DIRBSIZE_BITS) | slot; - - /* copy filename and data in dirslot */ - filldir(dirent, nameptr, namelen, filp->f_pos, inodenum, DT_UNKNOWN); - - /* sanity check */ - if (nameptr - (char *) dirblock + namelen > EFS_DIRBSIZE) { - printk(KERN_WARNING "EFS: directory entry %d exceeds directory block\n", slot); - slot++; - continue; - } - - /* store position of next slot */ - if (++slot == dirblock->slots) { - slot = 0; - block++; - } + if (!namelen) + continue; + /* found the next entry */ + ctx->pos = (block << EFS_DIRBSIZE_BITS) | slot; + + /* sanity check */ + if (nameptr - (char *) dirblock + namelen > EFS_DIRBSIZE) { + printk(KERN_WARNING "EFS: directory entry %d exceeds directory block\n", slot); + continue; + } + + /* copy filename and data in dirslot */ + if (!dir_emit(ctx, nameptr, namelen, inodenum, DT_UNKNOWN)) { brelse(bh); - filp->f_pos = (block << EFS_DIRBSIZE_BITS) | slot; - goto out; + return 0; } - slot++; } brelse(bh); slot = 0; block++; } - - filp->f_pos = (block << EFS_DIRBSIZE_BITS) | slot; -out: + ctx->pos = (block << EFS_DIRBSIZE_BITS) | slot; return 0; } diff --git a/fs/eventpoll.c b/fs/eventpoll.c index deecc7294a67..9ad17b15b454 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -34,6 +34,7 @@ #include <linux/mutex.h> #include <linux/anon_inodes.h> #include <linux/device.h> +#include <linux/freezer.h> #include <asm/uaccess.h> #include <asm/io.h> #include <asm/mman.h> @@ -1602,7 +1603,8 @@ fetch_events: } spin_unlock_irqrestore(&ep->lock, flags); - if (!schedule_hrtimeout_range(to, slack, HRTIMER_MODE_ABS)) + if (!freezable_schedule_hrtimeout_range(to, slack, + HRTIMER_MODE_ABS)) timed_out = 1; spin_lock_irqsave(&ep->lock, flags); @@ -1975,8 +1977,8 @@ SYSCALL_DEFINE6(epoll_pwait, int, epfd, struct epoll_event __user *, events, return -EINVAL; if (copy_from_user(&ksigmask, sigmask, sizeof(ksigmask))) return -EFAULT; - sigdelsetmask(&ksigmask, sigmask(SIGKILL) | sigmask(SIGSTOP)); - sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved); + sigsaved = current->blocked; + set_current_blocked(&ksigmask); } error = sys_epoll_wait(epfd, events, maxevents, timeout); @@ -1993,7 +1995,7 @@ SYSCALL_DEFINE6(epoll_pwait, int, epfd, struct epoll_event __user *, events, sizeof(sigsaved)); set_restore_sigmask(); } else - sigprocmask(SIG_SETMASK, &sigsaved, NULL); + set_current_blocked(&sigsaved); } return error; @@ -2020,8 +2022,8 @@ COMPAT_SYSCALL_DEFINE6(epoll_pwait, int, epfd, if (copy_from_user(&csigmask, sigmask, sizeof(csigmask))) return -EFAULT; sigset_from_compat(&ksigmask, &csigmask); - sigdelsetmask(&ksigmask, sigmask(SIGKILL) | sigmask(SIGSTOP)); - sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved); + sigsaved = current->blocked; + set_current_blocked(&ksigmask); } err = sys_epoll_wait(epfd, events, maxevents, timeout); @@ -2038,7 +2040,7 @@ COMPAT_SYSCALL_DEFINE6(epoll_pwait, int, epfd, sizeof(sigsaved)); set_restore_sigmask(); } else - sigprocmask(SIG_SETMASK, &sigsaved, NULL); + set_current_blocked(&sigsaved); } return err; diff --git a/fs/exec.c b/fs/exec.c index 643019585574..9c73def87642 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -110,13 +110,14 @@ SYSCALL_DEFINE1(uselib, const char __user *, library) static const struct open_flags uselib_flags = { .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC, .acc_mode = MAY_READ | MAY_EXEC | MAY_OPEN, - .intent = LOOKUP_OPEN + .intent = LOOKUP_OPEN, + .lookup_flags = LOOKUP_FOLLOW, }; if (IS_ERR(tmp)) goto out; - file = do_filp_open(AT_FDCWD, tmp, &uselib_flags, LOOKUP_FOLLOW); + file = do_filp_open(AT_FDCWD, tmp, &uselib_flags); putname(tmp); error = PTR_ERR(file); if (IS_ERR(file)) @@ -756,10 +757,11 @@ struct file *open_exec(const char *name) static const struct open_flags open_exec_flags = { .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC, .acc_mode = MAY_EXEC | MAY_OPEN, - .intent = LOOKUP_OPEN + .intent = LOOKUP_OPEN, + .lookup_flags = LOOKUP_FOLLOW, }; - file = do_filp_open(AT_FDCWD, &tmp, &open_exec_flags, LOOKUP_FOLLOW); + file = do_filp_open(AT_FDCWD, &tmp, &open_exec_flags); if (IS_ERR(file)) goto out; @@ -930,6 +932,7 @@ static int de_thread(struct task_struct *tsk) * also take its birthdate (always earlier than our own). */ tsk->start_time = leader->start_time; + tsk->real_start_time = leader->real_start_time; BUG_ON(!same_thread_group(leader, tsk)); BUG_ON(has_group_leader_pid(tsk)); @@ -945,9 +948,8 @@ static int de_thread(struct task_struct *tsk) * Note: The old leader also uses this pid until release_task * is called. Odd but simple and correct. */ - detach_pid(tsk, PIDTYPE_PID); tsk->pid = leader->pid; - attach_pid(tsk, PIDTYPE_PID, task_pid(leader)); + change_pid(tsk, PIDTYPE_PID, task_pid(leader)); transfer_pid(leader, tsk, PIDTYPE_PGID); transfer_pid(leader, tsk, PIDTYPE_SID); @@ -1135,13 +1137,6 @@ void setup_new_exec(struct linux_binprm * bprm) set_dumpable(current->mm, suid_dumpable); } - /* - * Flush performance counters when crossing a - * security domain: - */ - if (!get_dumpable(current->mm)) - perf_event_exit_task(current); - /* An exec changes our domain. We are no longer part of the thread group */ @@ -1205,6 +1200,15 @@ void install_exec_creds(struct linux_binprm *bprm) commit_creds(bprm->cred); bprm->cred = NULL; + + /* + * Disable monitoring for regular users + * when executing setuid binaries. Must + * wait until new credentials are committed + * by commit_creds() above + */ + if (get_dumpable(current->mm) != SUID_DUMP_USER) + perf_event_exit_task(current); /* * cred_guard_mutex must be held at least to this point to prevent * ptrace_attach() from altering our determination of the task's @@ -1461,7 +1465,6 @@ static int do_execve_common(const char *filename, struct files_struct *displaced; bool clear_in_exec; int retval; - const struct cred *cred = current_cred(); /* * We move the actual failure in case of RLIMIT_NPROC excess from @@ -1470,7 +1473,7 @@ static int do_execve_common(const char *filename, * whether NPROC limit is still exceeded. */ if ((current->flags & PF_NPROC_EXCEEDED) && - atomic_read(&cred->user->processes) > rlimit(RLIMIT_NPROC)) { + atomic_read(¤t_user()->processes) > rlimit(RLIMIT_NPROC)) { retval = -EAGAIN; goto out_ret; } diff --git a/fs/exofs/dir.c b/fs/exofs/dir.c index 46375896cfc0..49f51ab4caac 100644 --- a/fs/exofs/dir.c +++ b/fs/exofs/dir.c @@ -239,22 +239,19 @@ void exofs_set_de_type(struct exofs_dir_entry *de, struct inode *inode) } static int -exofs_readdir(struct file *filp, void *dirent, filldir_t filldir) +exofs_readdir(struct file *file, struct dir_context *ctx) { - loff_t pos = filp->f_pos; - struct inode *inode = file_inode(filp); + loff_t pos = ctx->pos; + struct inode *inode = file_inode(file); unsigned int offset = pos & ~PAGE_CACHE_MASK; unsigned long n = pos >> PAGE_CACHE_SHIFT; unsigned long npages = dir_pages(inode); unsigned chunk_mask = ~(exofs_chunk_size(inode)-1); - unsigned char *types = NULL; - int need_revalidate = (filp->f_version != inode->i_version); + int need_revalidate = (file->f_version != inode->i_version); if (pos > inode->i_size - EXOFS_DIR_REC_LEN(1)) return 0; - types = exofs_filetype_table; - for ( ; n < npages; n++, offset = 0) { char *kaddr, *limit; struct exofs_dir_entry *de; @@ -263,7 +260,7 @@ exofs_readdir(struct file *filp, void *dirent, filldir_t filldir) if (IS_ERR(page)) { EXOFS_ERR("ERROR: bad page in directory(0x%lx)\n", inode->i_ino); - filp->f_pos += PAGE_CACHE_SIZE - offset; + ctx->pos += PAGE_CACHE_SIZE - offset; return PTR_ERR(page); } kaddr = page_address(page); @@ -271,9 +268,9 @@ exofs_readdir(struct file *filp, void *dirent, filldir_t filldir) if (offset) { offset = exofs_validate_entry(kaddr, offset, chunk_mask); - filp->f_pos = (n<<PAGE_CACHE_SHIFT) + offset; + ctx->pos = (n<<PAGE_CACHE_SHIFT) + offset; } - filp->f_version = inode->i_version; + file->f_version = inode->i_version; need_revalidate = 0; } de = (struct exofs_dir_entry *)(kaddr + offset); @@ -288,27 +285,24 @@ exofs_readdir(struct file *filp, void *dirent, filldir_t filldir) return -EIO; } if (de->inode_no) { - int over; - unsigned char d_type = DT_UNKNOWN; + unsigned char t; - if (types && de->file_type < EXOFS_FT_MAX) - d_type = types[de->file_type]; + if (de->file_type < EXOFS_FT_MAX) + t = exofs_filetype_table[de->file_type]; + else + t = DT_UNKNOWN; - offset = (char *)de - kaddr; - over = filldir(dirent, de->name, de->name_len, - (n<<PAGE_CACHE_SHIFT) | offset, + if (!dir_emit(ctx, de->name, de->name_len, le64_to_cpu(de->inode_no), - d_type); - if (over) { + t)) { exofs_put_page(page); return 0; } } - filp->f_pos += le16_to_cpu(de->rec_len); + ctx->pos += le16_to_cpu(de->rec_len); } exofs_put_page(page); } - return 0; } @@ -669,5 +663,5 @@ not_empty: const struct file_operations exofs_dir_operations = { .llseek = generic_file_llseek, .read = generic_read_dir, - .readdir = exofs_readdir, + .iterate = exofs_readdir, }; diff --git a/fs/exofs/inode.c b/fs/exofs/inode.c index d1f80abd8828..2ec8eb1ab269 100644 --- a/fs/exofs/inode.c +++ b/fs/exofs/inode.c @@ -953,9 +953,11 @@ static int exofs_releasepage(struct page *page, gfp_t gfp) return 0; } -static void exofs_invalidatepage(struct page *page, unsigned long offset) +static void exofs_invalidatepage(struct page *page, unsigned int offset, + unsigned int length) { - EXOFS_DBGMSG("page 0x%lx offset 0x%lx\n", page->index, offset); + EXOFS_DBGMSG("page 0x%lx offset 0x%x length 0x%x\n", + page->index, offset, length); WARN_ON(1); } diff --git a/fs/exportfs/expfs.c b/fs/exportfs/expfs.c index 262fc9940982..293bc2e47a73 100644 --- a/fs/exportfs/expfs.c +++ b/fs/exportfs/expfs.c @@ -212,6 +212,7 @@ reconnect_path(struct vfsmount *mnt, struct dentry *target_dir, char *nbuf) } struct getdents_callback { + struct dir_context ctx; char *name; /* name that was found. It already points to a buffer NAME_MAX+1 is size */ unsigned long ino; /* the inum we are looking for */ @@ -254,7 +255,11 @@ static int get_name(const struct path *path, char *name, struct dentry *child) struct inode *dir = path->dentry->d_inode; int error; struct file *file; - struct getdents_callback buffer; + struct getdents_callback buffer = { + .ctx.actor = filldir_one, + .name = name, + .ino = child->d_inode->i_ino + }; error = -ENOTDIR; if (!dir || !S_ISDIR(dir->i_mode)) @@ -271,17 +276,14 @@ static int get_name(const struct path *path, char *name, struct dentry *child) goto out; error = -EINVAL; - if (!file->f_op->readdir) + if (!file->f_op->iterate) goto out_close; - buffer.name = name; - buffer.ino = child->d_inode->i_ino; - buffer.found = 0; buffer.sequence = 0; while (1) { int old_seq = buffer.sequence; - error = vfs_readdir(file, filldir_one, &buffer); + error = iterate_dir(file, &buffer.ctx); if (buffer.found) { error = 0; break; diff --git a/fs/ext2/dir.c b/fs/ext2/dir.c index 4237722bfd27..6e1d4ab09d72 100644 --- a/fs/ext2/dir.c +++ b/fs/ext2/dir.c @@ -287,17 +287,17 @@ static inline void ext2_set_de_type(ext2_dirent *de, struct inode *inode) } static int -ext2_readdir (struct file * filp, void * dirent, filldir_t filldir) +ext2_readdir(struct file *file, struct dir_context *ctx) { - loff_t pos = filp->f_pos; - struct inode *inode = file_inode(filp); + loff_t pos = ctx->pos; + struct inode *inode = file_inode(file); struct super_block *sb = inode->i_sb; unsigned int offset = pos & ~PAGE_CACHE_MASK; unsigned long n = pos >> PAGE_CACHE_SHIFT; unsigned long npages = dir_pages(inode); unsigned chunk_mask = ~(ext2_chunk_size(inode)-1); unsigned char *types = NULL; - int need_revalidate = filp->f_version != inode->i_version; + int need_revalidate = file->f_version != inode->i_version; if (pos > inode->i_size - EXT2_DIR_REC_LEN(1)) return 0; @@ -314,16 +314,16 @@ ext2_readdir (struct file * filp, void * dirent, filldir_t filldir) ext2_error(sb, __func__, "bad page in #%lu", inode->i_ino); - filp->f_pos += PAGE_CACHE_SIZE - offset; + ctx->pos += PAGE_CACHE_SIZE - offset; return PTR_ERR(page); } kaddr = page_address(page); if (unlikely(need_revalidate)) { if (offset) { offset = ext2_validate_entry(kaddr, offset, chunk_mask); - filp->f_pos = (n<<PAGE_CACHE_SHIFT) + offset; + ctx->pos = (n<<PAGE_CACHE_SHIFT) + offset; } - filp->f_version = inode->i_version; + file->f_version = inode->i_version; need_revalidate = 0; } de = (ext2_dirent *)(kaddr+offset); @@ -336,22 +336,19 @@ ext2_readdir (struct file * filp, void * dirent, filldir_t filldir) return -EIO; } if (de->inode) { - int over; unsigned char d_type = DT_UNKNOWN; if (types && de->file_type < EXT2_FT_MAX) d_type = types[de->file_type]; - offset = (char *)de - kaddr; - over = filldir(dirent, de->name, de->name_len, - (n<<PAGE_CACHE_SHIFT) | offset, - le32_to_cpu(de->inode), d_type); - if (over) { + if (!dir_emit(ctx, de->name, de->name_len, + le32_to_cpu(de->inode), + d_type)) { ext2_put_page(page); return 0; } } - filp->f_pos += ext2_rec_len_from_disk(de->rec_len); + ctx->pos += ext2_rec_len_from_disk(de->rec_len); } ext2_put_page(page); } @@ -724,7 +721,7 @@ not_empty: const struct file_operations ext2_dir_operations = { .llseek = generic_file_llseek, .read = generic_read_dir, - .readdir = ext2_readdir, + .iterate = ext2_readdir, .unlocked_ioctl = ext2_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = ext2_compat_ioctl, diff --git a/fs/ext2/namei.c b/fs/ext2/namei.c index 73b0d9519836..256dd5f4c1c4 100644 --- a/fs/ext2/namei.c +++ b/fs/ext2/namei.c @@ -119,6 +119,29 @@ static int ext2_create (struct inode * dir, struct dentry * dentry, umode_t mode return ext2_add_nondir(dentry, inode); } +static int ext2_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode) +{ + struct inode *inode = ext2_new_inode(dir, mode, NULL); + if (IS_ERR(inode)) + return PTR_ERR(inode); + + inode->i_op = &ext2_file_inode_operations; + if (ext2_use_xip(inode->i_sb)) { + inode->i_mapping->a_ops = &ext2_aops_xip; + inode->i_fop = &ext2_xip_file_operations; + } else if (test_opt(inode->i_sb, NOBH)) { + inode->i_mapping->a_ops = &ext2_nobh_aops; + inode->i_fop = &ext2_file_operations; + } else { + inode->i_mapping->a_ops = &ext2_aops; + inode->i_fop = &ext2_file_operations; + } + mark_inode_dirty(inode); + d_tmpfile(dentry, inode); + unlock_new_inode(inode); + return 0; +} + static int ext2_mknod (struct inode * dir, struct dentry *dentry, umode_t mode, dev_t rdev) { struct inode * inode; @@ -398,6 +421,7 @@ const struct inode_operations ext2_dir_inode_operations = { #endif .setattr = ext2_setattr, .get_acl = ext2_get_acl, + .tmpfile = ext2_tmpfile, }; const struct inode_operations ext2_special_inode_operations = { diff --git a/fs/ext3/dir.c b/fs/ext3/dir.c index 87eccbbca255..f522425aaa24 100644 --- a/fs/ext3/dir.c +++ b/fs/ext3/dir.c @@ -28,8 +28,7 @@ static unsigned char ext3_filetype_table[] = { DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK }; -static int ext3_dx_readdir(struct file * filp, - void * dirent, filldir_t filldir); +static int ext3_dx_readdir(struct file *, struct dir_context *); static unsigned char get_dtype(struct super_block *sb, int filetype) { @@ -91,36 +90,30 @@ int ext3_check_dir_entry (const char * function, struct inode * dir, return error_msg == NULL ? 1 : 0; } -static int ext3_readdir(struct file * filp, - void * dirent, filldir_t filldir) +static int ext3_readdir(struct file *file, struct dir_context *ctx) { - int error = 0; unsigned long offset; - int i, stored; + int i; struct ext3_dir_entry_2 *de; int err; - struct inode *inode = file_inode(filp); + struct inode *inode = file_inode(file); struct super_block *sb = inode->i_sb; - int ret = 0; int dir_has_error = 0; if (is_dx_dir(inode)) { - err = ext3_dx_readdir(filp, dirent, filldir); - if (err != ERR_BAD_DX_DIR) { - ret = err; - goto out; - } + err = ext3_dx_readdir(file, ctx); + if (err != ERR_BAD_DX_DIR) + return err; /* * We don't set the inode dirty flag since it's not * critical that it get flushed back to the disk. */ - EXT3_I(file_inode(filp))->i_flags &= ~EXT3_INDEX_FL; + EXT3_I(inode)->i_flags &= ~EXT3_INDEX_FL; } - stored = 0; - offset = filp->f_pos & (sb->s_blocksize - 1); + offset = ctx->pos & (sb->s_blocksize - 1); - while (!error && !stored && filp->f_pos < inode->i_size) { - unsigned long blk = filp->f_pos >> EXT3_BLOCK_SIZE_BITS(sb); + while (ctx->pos < inode->i_size) { + unsigned long blk = ctx->pos >> EXT3_BLOCK_SIZE_BITS(sb); struct buffer_head map_bh; struct buffer_head *bh = NULL; @@ -129,12 +122,12 @@ static int ext3_readdir(struct file * filp, if (err > 0) { pgoff_t index = map_bh.b_blocknr >> (PAGE_CACHE_SHIFT - inode->i_blkbits); - if (!ra_has_index(&filp->f_ra, index)) + if (!ra_has_index(&file->f_ra, index)) page_cache_sync_readahead( sb->s_bdev->bd_inode->i_mapping, - &filp->f_ra, filp, + &file->f_ra, file, index, 1); - filp->f_ra.prev_pos = (loff_t)index << PAGE_CACHE_SHIFT; + file->f_ra.prev_pos = (loff_t)index << PAGE_CACHE_SHIFT; bh = ext3_bread(NULL, inode, blk, 0, &err); } @@ -146,22 +139,21 @@ static int ext3_readdir(struct file * filp, if (!dir_has_error) { ext3_error(sb, __func__, "directory #%lu " "contains a hole at offset %lld", - inode->i_ino, filp->f_pos); + inode->i_ino, ctx->pos); dir_has_error = 1; } /* corrupt size? Maybe no more blocks to read */ - if (filp->f_pos > inode->i_blocks << 9) + if (ctx->pos > inode->i_blocks << 9) break; - filp->f_pos += sb->s_blocksize - offset; + ctx->pos += sb->s_blocksize - offset; continue; } -revalidate: /* If the dir block has changed since the last call to * readdir(2), then we might be pointing to an invalid * dirent right now. Scan from the start of the block * to make sure. */ - if (filp->f_version != inode->i_version) { + if (offset && file->f_version != inode->i_version) { for (i = 0; i < sb->s_blocksize && i < offset; ) { de = (struct ext3_dir_entry_2 *) (bh->b_data + i); @@ -177,53 +169,40 @@ revalidate: i += ext3_rec_len_from_disk(de->rec_len); } offset = i; - filp->f_pos = (filp->f_pos & ~(sb->s_blocksize - 1)) + ctx->pos = (ctx->pos & ~(sb->s_blocksize - 1)) | offset; - filp->f_version = inode->i_version; + file->f_version = inode->i_version; } - while (!error && filp->f_pos < inode->i_size + while (ctx->pos < inode->i_size && offset < sb->s_blocksize) { de = (struct ext3_dir_entry_2 *) (bh->b_data + offset); if (!ext3_check_dir_entry ("ext3_readdir", inode, de, bh, offset)) { - /* On error, skip the f_pos to the + /* On error, skip the to the next block. */ - filp->f_pos = (filp->f_pos | + ctx->pos = (ctx->pos | (sb->s_blocksize - 1)) + 1; - brelse (bh); - ret = stored; - goto out; + break; } offset += ext3_rec_len_from_disk(de->rec_len); if (le32_to_cpu(de->inode)) { - /* We might block in the next section - * if the data destination is - * currently swapped out. So, use a - * version stamp to detect whether or - * not the directory has been modified - * during the copy operation. - */ - u64 version = filp->f_version; - - error = filldir(dirent, de->name, - de->name_len, - filp->f_pos, - le32_to_cpu(de->inode), - get_dtype(sb, de->file_type)); - if (error) - break; - if (version != filp->f_version) - goto revalidate; - stored ++; + if (!dir_emit(ctx, de->name, de->name_len, + le32_to_cpu(de->inode), + get_dtype(sb, de->file_type))) { + brelse(bh); + return 0; + } } - filp->f_pos += ext3_rec_len_from_disk(de->rec_len); + ctx->pos += ext3_rec_len_from_disk(de->rec_len); } offset = 0; brelse (bh); + if (ctx->pos < inode->i_size) + if (!dir_relax(inode)) + return 0; } -out: - return ret; + return 0; } static inline int is_32bit_api(void) @@ -452,62 +431,54 @@ int ext3_htree_store_dirent(struct file *dir_file, __u32 hash, * for all entres on the fname linked list. (Normally there is only * one entry on the linked list, unless there are 62 bit hash collisions.) */ -static int call_filldir(struct file * filp, void * dirent, - filldir_t filldir, struct fname *fname) +static bool call_filldir(struct file *file, struct dir_context *ctx, + struct fname *fname) { - struct dir_private_info *info = filp->private_data; - loff_t curr_pos; - struct inode *inode = file_inode(filp); - struct super_block * sb; - int error; - - sb = inode->i_sb; + struct dir_private_info *info = file->private_data; + struct inode *inode = file_inode(file); + struct super_block *sb = inode->i_sb; if (!fname) { printk("call_filldir: called with null fname?!?\n"); - return 0; + return true; } - curr_pos = hash2pos(filp, fname->hash, fname->minor_hash); + ctx->pos = hash2pos(file, fname->hash, fname->minor_hash); while (fname) { - error = filldir(dirent, fname->name, - fname->name_len, curr_pos, + if (!dir_emit(ctx, fname->name, fname->name_len, fname->inode, - get_dtype(sb, fname->file_type)); - if (error) { - filp->f_pos = curr_pos; + get_dtype(sb, fname->file_type))) { info->extra_fname = fname; - return error; + return false; } fname = fname->next; } - return 0; + return true; } -static int ext3_dx_readdir(struct file * filp, - void * dirent, filldir_t filldir) +static int ext3_dx_readdir(struct file *file, struct dir_context *ctx) { - struct dir_private_info *info = filp->private_data; - struct inode *inode = file_inode(filp); + struct dir_private_info *info = file->private_data; + struct inode *inode = file_inode(file); struct fname *fname; int ret; if (!info) { - info = ext3_htree_create_dir_info(filp, filp->f_pos); + info = ext3_htree_create_dir_info(file, ctx->pos); if (!info) return -ENOMEM; - filp->private_data = info; + file->private_data = info; } - if (filp->f_pos == ext3_get_htree_eof(filp)) + if (ctx->pos == ext3_get_htree_eof(file)) return 0; /* EOF */ /* Some one has messed with f_pos; reset the world */ - if (info->last_pos != filp->f_pos) { + if (info->last_pos != ctx->pos) { free_rb_tree_fname(&info->root); info->curr_node = NULL; info->extra_fname = NULL; - info->curr_hash = pos2maj_hash(filp, filp->f_pos); - info->curr_minor_hash = pos2min_hash(filp, filp->f_pos); + info->curr_hash = pos2maj_hash(file, ctx->pos); + info->curr_minor_hash = pos2min_hash(file, ctx->pos); } /* @@ -515,7 +486,7 @@ static int ext3_dx_readdir(struct file * filp, * chain, return them first. */ if (info->extra_fname) { - if (call_filldir(filp, dirent, filldir, info->extra_fname)) + if (!call_filldir(file, ctx, info->extra_fname)) goto finished; info->extra_fname = NULL; goto next_node; @@ -529,17 +500,17 @@ static int ext3_dx_readdir(struct file * filp, * cached entries. */ if ((!info->curr_node) || - (filp->f_version != inode->i_version)) { + (file->f_version != inode->i_version)) { info->curr_node = NULL; free_rb_tree_fname(&info->root); - filp->f_version = inode->i_version; - ret = ext3_htree_fill_tree(filp, info->curr_hash, + file->f_version = inode->i_version; + ret = ext3_htree_fill_tree(file, info->curr_hash, info->curr_minor_hash, &info->next_hash); if (ret < 0) return ret; if (ret == 0) { - filp->f_pos = ext3_get_htree_eof(filp); + ctx->pos = ext3_get_htree_eof(file); break; } info->curr_node = rb_first(&info->root); @@ -548,7 +519,7 @@ static int ext3_dx_readdir(struct file * filp, fname = rb_entry(info->curr_node, struct fname, rb_hash); info->curr_hash = fname->hash; info->curr_minor_hash = fname->minor_hash; - if (call_filldir(filp, dirent, filldir, fname)) + if (!call_filldir(file, ctx, fname)) break; next_node: info->curr_node = rb_next(info->curr_node); @@ -559,7 +530,7 @@ static int ext3_dx_readdir(struct file * filp, info->curr_minor_hash = fname->minor_hash; } else { if (info->next_hash == ~0) { - filp->f_pos = ext3_get_htree_eof(filp); + ctx->pos = ext3_get_htree_eof(file); break; } info->curr_hash = info->next_hash; @@ -567,7 +538,7 @@ static int ext3_dx_readdir(struct file * filp, } } finished: - info->last_pos = filp->f_pos; + info->last_pos = ctx->pos; return 0; } @@ -582,7 +553,7 @@ static int ext3_release_dir (struct inode * inode, struct file * filp) const struct file_operations ext3_dir_operations = { .llseek = ext3_dir_llseek, .read = generic_read_dir, - .readdir = ext3_readdir, + .iterate = ext3_readdir, .unlocked_ioctl = ext3_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = ext3_compat_ioctl, diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c index 23c712825640..2bd85486b879 100644 --- a/fs/ext3/inode.c +++ b/fs/ext3/inode.c @@ -1825,19 +1825,20 @@ ext3_readpages(struct file *file, struct address_space *mapping, return mpage_readpages(mapping, pages, nr_pages, ext3_get_block); } -static void ext3_invalidatepage(struct page *page, unsigned long offset) +static void ext3_invalidatepage(struct page *page, unsigned int offset, + unsigned int length) { journal_t *journal = EXT3_JOURNAL(page->mapping->host); - trace_ext3_invalidatepage(page, offset); + trace_ext3_invalidatepage(page, offset, length); /* * If it's a full truncate we just forget about the pending dirtying */ - if (offset == 0) + if (offset == 0 && length == PAGE_CACHE_SIZE) ClearPageChecked(page); - journal_invalidatepage(journal, page, offset); + journal_invalidatepage(journal, page, offset, length); } static int ext3_releasepage(struct page *page, gfp_t wait) @@ -1984,6 +1985,7 @@ static const struct address_space_operations ext3_ordered_aops = { .direct_IO = ext3_direct_IO, .migratepage = buffer_migrate_page, .is_partially_uptodate = block_is_partially_uptodate, + .is_dirty_writeback = buffer_check_dirty_writeback, .error_remove_page = generic_error_remove_page, }; diff --git a/fs/ext3/namei.c b/fs/ext3/namei.c index 692de13e3596..998ea111e537 100644 --- a/fs/ext3/namei.c +++ b/fs/ext3/namei.c @@ -576,11 +576,8 @@ static int htree_dirblock_to_tree(struct file *dir_file, if (!ext3_check_dir_entry("htree_dirblock_to_tree", dir, de, bh, (block<<EXT3_BLOCK_SIZE_BITS(dir->i_sb)) +((char *)de - bh->b_data))) { - /* On error, skip the f_pos to the next block. */ - dir_file->f_pos = (dir_file->f_pos | - (dir->i_sb->s_blocksize - 1)) + 1; - brelse (bh); - return count; + /* silently ignore the rest of the block */ + break; } ext3fs_dirhash(de->name, de->name_len, hinfo); if ((hinfo->hash < start_hash) || @@ -1762,6 +1759,45 @@ retry: return err; } +static int ext3_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode) +{ + handle_t *handle; + struct inode *inode; + int err, retries = 0; + + dquot_initialize(dir); + +retry: + handle = ext3_journal_start(dir, EXT3_MAXQUOTAS_INIT_BLOCKS(dir->i_sb) + + 4 + EXT3_XATTR_TRANS_BLOCKS); + + if (IS_ERR(handle)) + return PTR_ERR(handle); + + inode = ext3_new_inode (handle, dir, NULL, mode); + err = PTR_ERR(inode); + if (!IS_ERR(inode)) { + inode->i_op = &ext3_file_inode_operations; + inode->i_fop = &ext3_file_operations; + ext3_set_aops(inode); + err = ext3_orphan_add(handle, inode); + if (err) + goto err_drop_inode; + mark_inode_dirty(inode); + d_tmpfile(dentry, inode); + unlock_new_inode(inode); + } + ext3_journal_stop(handle); + if (err == -ENOSPC && ext3_should_retry_alloc(dir->i_sb, &retries)) + goto retry; + return err; +err_drop_inode: + ext3_journal_stop(handle); + unlock_new_inode(inode); + iput(inode); + return err; +} + static int ext3_mkdir(struct inode * dir, struct dentry * dentry, umode_t mode) { handle_t *handle; @@ -2303,7 +2339,7 @@ static int ext3_link (struct dentry * old_dentry, retry: handle = ext3_journal_start(dir, EXT3_DATA_TRANS_BLOCKS(dir->i_sb) + - EXT3_INDEX_EXTRA_TRANS_BLOCKS); + EXT3_INDEX_EXTRA_TRANS_BLOCKS + 1); if (IS_ERR(handle)) return PTR_ERR(handle); @@ -2317,6 +2353,11 @@ retry: err = ext3_add_entry(handle, dentry, inode); if (!err) { ext3_mark_inode_dirty(handle, inode); + /* this can happen only for tmpfile being + * linked the first time + */ + if (inode->i_nlink == 1) + ext3_orphan_del(handle, inode); d_instantiate(dentry, inode); } else { drop_nlink(inode); @@ -2519,6 +2560,7 @@ const struct inode_operations ext3_dir_inode_operations = { .mkdir = ext3_mkdir, .rmdir = ext3_rmdir, .mknod = ext3_mknod, + .tmpfile = ext3_tmpfile, .rename = ext3_rename, .setattr = ext3_setattr, #ifdef CONFIG_EXT3_FS_XATTR diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c index d0f13eada0ed..58339393fa6e 100644 --- a/fs/ext4/balloc.c +++ b/fs/ext4/balloc.c @@ -682,11 +682,15 @@ ext4_fsblk_t ext4_count_free_clusters(struct super_block *sb) static inline int test_root(ext4_group_t a, int b) { - int num = b; - - while (a > num) - num *= b; - return num == a; + while (1) { + if (a < b) + return 0; + if (a == b) + return 1; + if ((a % b) != 0) + return 0; + a = a / b; + } } static int ext4_group_sparse(ext4_group_t group) diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c index f8d56e4254e0..3c7d288ae94c 100644 --- a/fs/ext4/dir.c +++ b/fs/ext4/dir.c @@ -29,8 +29,7 @@ #include "ext4.h" #include "xattr.h" -static int ext4_dx_readdir(struct file *filp, - void *dirent, filldir_t filldir); +static int ext4_dx_readdir(struct file *, struct dir_context *); /** * Check if the given dir-inode refers to an htree-indexed directory @@ -103,60 +102,56 @@ int __ext4_check_dir_entry(const char *function, unsigned int line, return 1; } -static int ext4_readdir(struct file *filp, - void *dirent, filldir_t filldir) +static int ext4_readdir(struct file *file, struct dir_context *ctx) { - int error = 0; unsigned int offset; int i, stored; struct ext4_dir_entry_2 *de; int err; - struct inode *inode = file_inode(filp); + struct inode *inode = file_inode(file); struct super_block *sb = inode->i_sb; - int ret = 0; int dir_has_error = 0; if (is_dx_dir(inode)) { - err = ext4_dx_readdir(filp, dirent, filldir); + err = ext4_dx_readdir(file, ctx); if (err != ERR_BAD_DX_DIR) { - ret = err; - goto out; + return err; } /* * We don't set the inode dirty flag since it's not * critical that it get flushed back to the disk. */ - ext4_clear_inode_flag(file_inode(filp), + ext4_clear_inode_flag(file_inode(file), EXT4_INODE_INDEX); } if (ext4_has_inline_data(inode)) { int has_inline_data = 1; - ret = ext4_read_inline_dir(filp, dirent, filldir, + int ret = ext4_read_inline_dir(file, ctx, &has_inline_data); if (has_inline_data) return ret; } stored = 0; - offset = filp->f_pos & (sb->s_blocksize - 1); + offset = ctx->pos & (sb->s_blocksize - 1); - while (!error && !stored && filp->f_pos < inode->i_size) { + while (ctx->pos < inode->i_size) { struct ext4_map_blocks map; struct buffer_head *bh = NULL; - map.m_lblk = filp->f_pos >> EXT4_BLOCK_SIZE_BITS(sb); + map.m_lblk = ctx->pos >> EXT4_BLOCK_SIZE_BITS(sb); map.m_len = 1; err = ext4_map_blocks(NULL, inode, &map, 0); if (err > 0) { pgoff_t index = map.m_pblk >> (PAGE_CACHE_SHIFT - inode->i_blkbits); - if (!ra_has_index(&filp->f_ra, index)) + if (!ra_has_index(&file->f_ra, index)) page_cache_sync_readahead( sb->s_bdev->bd_inode->i_mapping, - &filp->f_ra, filp, + &file->f_ra, file, index, 1); - filp->f_ra.prev_pos = (loff_t)index << PAGE_CACHE_SHIFT; + file->f_ra.prev_pos = (loff_t)index << PAGE_CACHE_SHIFT; bh = ext4_bread(NULL, inode, map.m_lblk, 0, &err); } @@ -166,16 +161,16 @@ static int ext4_readdir(struct file *filp, */ if (!bh) { if (!dir_has_error) { - EXT4_ERROR_FILE(filp, 0, + EXT4_ERROR_FILE(file, 0, "directory contains a " "hole at offset %llu", - (unsigned long long) filp->f_pos); + (unsigned long long) ctx->pos); dir_has_error = 1; } /* corrupt size? Maybe no more blocks to read */ - if (filp->f_pos > inode->i_blocks << 9) + if (ctx->pos > inode->i_blocks << 9) break; - filp->f_pos += sb->s_blocksize - offset; + ctx->pos += sb->s_blocksize - offset; continue; } @@ -183,21 +178,20 @@ static int ext4_readdir(struct file *filp, if (!buffer_verified(bh) && !ext4_dirent_csum_verify(inode, (struct ext4_dir_entry *)bh->b_data)) { - EXT4_ERROR_FILE(filp, 0, "directory fails checksum " + EXT4_ERROR_FILE(file, 0, "directory fails checksum " "at offset %llu", - (unsigned long long)filp->f_pos); - filp->f_pos += sb->s_blocksize - offset; + (unsigned long long)ctx->pos); + ctx->pos += sb->s_blocksize - offset; brelse(bh); continue; } set_buffer_verified(bh); -revalidate: /* If the dir block has changed since the last call to * readdir(2), then we might be pointing to an invalid * dirent right now. Scan from the start of the block * to make sure. */ - if (filp->f_version != inode->i_version) { + if (file->f_version != inode->i_version) { for (i = 0; i < sb->s_blocksize && i < offset; ) { de = (struct ext4_dir_entry_2 *) (bh->b_data + i); @@ -214,57 +208,46 @@ revalidate: sb->s_blocksize); } offset = i; - filp->f_pos = (filp->f_pos & ~(sb->s_blocksize - 1)) + ctx->pos = (ctx->pos & ~(sb->s_blocksize - 1)) | offset; - filp->f_version = inode->i_version; + file->f_version = inode->i_version; } - while (!error && filp->f_pos < inode->i_size + while (ctx->pos < inode->i_size && offset < sb->s_blocksize) { de = (struct ext4_dir_entry_2 *) (bh->b_data + offset); - if (ext4_check_dir_entry(inode, filp, de, bh, + if (ext4_check_dir_entry(inode, file, de, bh, bh->b_data, bh->b_size, offset)) { /* - * On error, skip the f_pos to the next block + * On error, skip to the next block */ - filp->f_pos = (filp->f_pos | + ctx->pos = (ctx->pos | (sb->s_blocksize - 1)) + 1; - brelse(bh); - ret = stored; - goto out; + break; } offset += ext4_rec_len_from_disk(de->rec_len, sb->s_blocksize); if (le32_to_cpu(de->inode)) { - /* We might block in the next section - * if the data destination is - * currently swapped out. So, use a - * version stamp to detect whether or - * not the directory has been modified - * during the copy operation. - */ - u64 version = filp->f_version; - - error = filldir(dirent, de->name, + if (!dir_emit(ctx, de->name, de->name_len, - filp->f_pos, le32_to_cpu(de->inode), - get_dtype(sb, de->file_type)); - if (error) - break; - if (version != filp->f_version) - goto revalidate; - stored++; + get_dtype(sb, de->file_type))) { + brelse(bh); + return 0; + } } - filp->f_pos += ext4_rec_len_from_disk(de->rec_len, + ctx->pos += ext4_rec_len_from_disk(de->rec_len, sb->s_blocksize); } offset = 0; brelse(bh); + if (ctx->pos < inode->i_size) { + if (!dir_relax(inode)) + return 0; + } } -out: - return ret; + return 0; } static inline int is_32bit_api(void) @@ -492,16 +475,12 @@ int ext4_htree_store_dirent(struct file *dir_file, __u32 hash, * for all entres on the fname linked list. (Normally there is only * one entry on the linked list, unless there are 62 bit hash collisions.) */ -static int call_filldir(struct file *filp, void *dirent, - filldir_t filldir, struct fname *fname) +static int call_filldir(struct file *file, struct dir_context *ctx, + struct fname *fname) { - struct dir_private_info *info = filp->private_data; - loff_t curr_pos; - struct inode *inode = file_inode(filp); - struct super_block *sb; - int error; - - sb = inode->i_sb; + struct dir_private_info *info = file->private_data; + struct inode *inode = file_inode(file); + struct super_block *sb = inode->i_sb; if (!fname) { ext4_msg(sb, KERN_ERR, "%s:%d: inode #%lu: comm %s: " @@ -509,47 +488,44 @@ static int call_filldir(struct file *filp, void *dirent, inode->i_ino, current->comm); return 0; } - curr_pos = hash2pos(filp, fname->hash, fname->minor_hash); + ctx->pos = hash2pos(file, fname->hash, fname->minor_hash); while (fname) { - error = filldir(dirent, fname->name, - fname->name_len, curr_pos, + if (!dir_emit(ctx, fname->name, + fname->name_len, fname->inode, - get_dtype(sb, fname->file_type)); - if (error) { - filp->f_pos = curr_pos; + get_dtype(sb, fname->file_type))) { info->extra_fname = fname; - return error; + return 1; } fname = fname->next; } return 0; } -static int ext4_dx_readdir(struct file *filp, - void *dirent, filldir_t filldir) +static int ext4_dx_readdir(struct file *file, struct dir_context *ctx) { - struct dir_private_info *info = filp->private_data; - struct inode *inode = file_inode(filp); + struct dir_private_info *info = file->private_data; + struct inode *inode = file_inode(file); struct fname *fname; int ret; if (!info) { - info = ext4_htree_create_dir_info(filp, filp->f_pos); + info = ext4_htree_create_dir_info(file, ctx->pos); if (!info) return -ENOMEM; - filp->private_data = info; + file->private_data = info; } - if (filp->f_pos == ext4_get_htree_eof(filp)) + if (ctx->pos == ext4_get_htree_eof(file)) return 0; /* EOF */ /* Some one has messed with f_pos; reset the world */ - if (info->last_pos != filp->f_pos) { + if (info->last_pos != ctx->pos) { free_rb_tree_fname(&info->root); info->curr_node = NULL; info->extra_fname = NULL; - info->curr_hash = pos2maj_hash(filp, filp->f_pos); - info->curr_minor_hash = pos2min_hash(filp, filp->f_pos); + info->curr_hash = pos2maj_hash(file, ctx->pos); + info->curr_minor_hash = pos2min_hash(file, ctx->pos); } /* @@ -557,7 +533,7 @@ static int ext4_dx_readdir(struct file *filp, * chain, return them first. */ if (info->extra_fname) { - if (call_filldir(filp, dirent, filldir, info->extra_fname)) + if (call_filldir(file, ctx, info->extra_fname)) goto finished; info->extra_fname = NULL; goto next_node; @@ -571,17 +547,17 @@ static int ext4_dx_readdir(struct file *filp, * cached entries. */ if ((!info->curr_node) || - (filp->f_version != inode->i_version)) { + (file->f_version != inode->i_version)) { info->curr_node = NULL; free_rb_tree_fname(&info->root); - filp->f_version = inode->i_version; - ret = ext4_htree_fill_tree(filp, info->curr_hash, + file->f_version = inode->i_version; + ret = ext4_htree_fill_tree(file, info->curr_hash, info->curr_minor_hash, &info->next_hash); if (ret < 0) return ret; if (ret == 0) { - filp->f_pos = ext4_get_htree_eof(filp); + ctx->pos = ext4_get_htree_eof(file); break; } info->curr_node = rb_first(&info->root); @@ -590,7 +566,7 @@ static int ext4_dx_readdir(struct file *filp, fname = rb_entry(info->curr_node, struct fname, rb_hash); info->curr_hash = fname->hash; info->curr_minor_hash = fname->minor_hash; - if (call_filldir(filp, dirent, filldir, fname)) + if (call_filldir(file, ctx, fname)) break; next_node: info->curr_node = rb_next(info->curr_node); @@ -601,7 +577,7 @@ static int ext4_dx_readdir(struct file *filp, info->curr_minor_hash = fname->minor_hash; } else { if (info->next_hash == ~0) { - filp->f_pos = ext4_get_htree_eof(filp); + ctx->pos = ext4_get_htree_eof(file); break; } info->curr_hash = info->next_hash; @@ -609,7 +585,7 @@ static int ext4_dx_readdir(struct file *filp, } } finished: - info->last_pos = filp->f_pos; + info->last_pos = ctx->pos; return 0; } @@ -624,7 +600,7 @@ static int ext4_release_dir(struct inode *inode, struct file *filp) const struct file_operations ext4_dir_operations = { .llseek = ext4_dir_llseek, .read = generic_read_dir, - .readdir = ext4_readdir, + .iterate = ext4_readdir, .unlocked_ioctl = ext4_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = ext4_compat_ioctl, diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index 5aae3d12d400..b577e45425b0 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -177,38 +177,28 @@ struct ext4_map_blocks { }; /* - * For delayed allocation tracking - */ -struct mpage_da_data { - struct inode *inode; - sector_t b_blocknr; /* start block number of extent */ - size_t b_size; /* size of extent */ - unsigned long b_state; /* state of the extent */ - unsigned long first_page, next_page; /* extent of pages */ - struct writeback_control *wbc; - int io_done; - int pages_written; - int retval; -}; - -/* * Flags for ext4_io_end->flags */ #define EXT4_IO_END_UNWRITTEN 0x0001 -#define EXT4_IO_END_ERROR 0x0002 -#define EXT4_IO_END_DIRECT 0x0004 +#define EXT4_IO_END_DIRECT 0x0002 /* - * For converting uninitialized extents on a work queue. + * For converting uninitialized extents on a work queue. 'handle' is used for + * buffered writeback. */ typedef struct ext4_io_end { struct list_head list; /* per-file finished IO list */ + handle_t *handle; /* handle reserved for extent + * conversion */ struct inode *inode; /* file being written to */ + struct bio *bio; /* Linked list of completed + * bios covering the extent */ unsigned int flag; /* unwritten or not */ loff_t offset; /* offset in the file */ ssize_t size; /* size of the extent */ struct kiocb *iocb; /* iocb struct for AIO */ int result; /* error value for AIO */ + atomic_t count; /* reference counter */ } ext4_io_end_t; struct ext4_io_submit { @@ -581,11 +571,6 @@ enum { #define EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER 0x0020 /* - * Flags used by ext4_discard_partial_page_buffers - */ -#define EXT4_DISCARD_PARTIAL_PG_ZERO_UNMAPPED 0x0001 - -/* * ioctl commands */ #define EXT4_IOC_GETFLAGS FS_IOC_GETFLAGS @@ -879,6 +864,7 @@ struct ext4_inode_info { rwlock_t i_es_lock; struct list_head i_es_lru; unsigned int i_es_lru_nr; /* protected by i_es_lock */ + unsigned long i_touch_when; /* jiffies of last accessing */ /* ialloc */ ext4_group_t i_last_alloc_group; @@ -903,12 +889,22 @@ struct ext4_inode_info { qsize_t i_reserved_quota; #endif - /* completed IOs that might need unwritten extents handling */ - struct list_head i_completed_io_list; + /* Lock protecting lists below */ spinlock_t i_completed_io_lock; + /* + * Completed IOs that need unwritten extents handling and have + * transaction reserved + */ + struct list_head i_rsv_conversion_list; + /* + * Completed IOs that need unwritten extents handling and don't have + * transaction reserved + */ + struct list_head i_unrsv_conversion_list; atomic_t i_ioend_count; /* Number of outstanding io_end structs */ atomic_t i_unwritten; /* Nr. of inflight conversions pending */ - struct work_struct i_unwritten_work; /* deferred extent conversion */ + struct work_struct i_rsv_conversion_work; + struct work_struct i_unrsv_conversion_work; spinlock_t i_block_reservation_lock; @@ -1245,7 +1241,6 @@ struct ext4_sb_info { unsigned int s_mb_stats; unsigned int s_mb_order2_reqs; unsigned int s_mb_group_prealloc; - unsigned int s_max_writeback_mb_bump; unsigned int s_max_dir_size_kb; /* where last allocation was done - for stream allocation */ unsigned long s_mb_last_group; @@ -1281,8 +1276,10 @@ struct ext4_sb_info { struct flex_groups *s_flex_groups; ext4_group_t s_flex_groups_allocated; - /* workqueue for dio unwritten */ - struct workqueue_struct *dio_unwritten_wq; + /* workqueue for unreserved extent convertions (dio) */ + struct workqueue_struct *unrsv_conversion_wq; + /* workqueue for reserved extent conversions (buffered io) */ + struct workqueue_struct *rsv_conversion_wq; /* timer for periodic error stats printing */ struct timer_list s_err_report; @@ -1307,6 +1304,7 @@ struct ext4_sb_info { /* Reclaim extents from extent status tree */ struct shrinker s_es_shrinker; struct list_head s_es_lru; + unsigned long s_es_last_sorted; struct percpu_counter s_extent_cache_cnt; spinlock_t s_es_lru_lock ____cacheline_aligned_in_smp; }; @@ -1342,6 +1340,9 @@ static inline void ext4_set_io_unwritten_flag(struct inode *inode, struct ext4_io_end *io_end) { if (!(io_end->flag & EXT4_IO_END_UNWRITTEN)) { + /* Writeback has to have coversion transaction reserved */ + WARN_ON(EXT4_SB(inode->i_sb)->s_journal && !io_end->handle && + !(io_end->flag & EXT4_IO_END_DIRECT)); io_end->flag |= EXT4_IO_END_UNWRITTEN; atomic_inc(&EXT4_I(inode)->i_unwritten); } @@ -1999,7 +2000,6 @@ static inline unsigned char get_dtype(struct super_block *sb, int filetype) /* fsync.c */ extern int ext4_sync_file(struct file *, loff_t, loff_t, int); -extern int ext4_flush_unwritten_io(struct inode *); /* hash.c */ extern int ext4fs_dirhash(const char *name, int len, struct @@ -2088,7 +2088,7 @@ extern int ext4_change_inode_journal_flag(struct inode *, int); extern int ext4_get_inode_loc(struct inode *, struct ext4_iloc *); extern int ext4_can_truncate(struct inode *inode); extern void ext4_truncate(struct inode *); -extern int ext4_punch_hole(struct file *file, loff_t offset, loff_t length); +extern int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length); extern int ext4_truncate_restart_trans(handle_t *, struct inode *, int nblocks); extern void ext4_set_inode_flags(struct inode *); extern void ext4_get_inode_flags(struct ext4_inode_info *); @@ -2096,9 +2096,12 @@ extern int ext4_alloc_da_blocks(struct inode *inode); extern void ext4_set_aops(struct inode *inode); extern int ext4_writepage_trans_blocks(struct inode *); extern int ext4_chunk_trans_blocks(struct inode *, int nrblocks); -extern int ext4_discard_partial_page_buffers(handle_t *handle, - struct address_space *mapping, loff_t from, - loff_t length, int flags); +extern int ext4_block_truncate_page(handle_t *handle, + struct address_space *mapping, loff_t from); +extern int ext4_block_zero_page_range(handle_t *handle, + struct address_space *mapping, loff_t from, loff_t length); +extern int ext4_zero_partial_blocks(handle_t *handle, struct inode *inode, + loff_t lstart, loff_t lend); extern int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf); extern qsize_t *ext4_get_reserved_space(struct inode *inode); extern void ext4_da_update_reserve_space(struct inode *inode, @@ -2111,7 +2114,7 @@ extern ssize_t ext4_ind_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, loff_t offset, unsigned long nr_segs); extern int ext4_ind_calc_metadata_amount(struct inode *inode, sector_t lblock); -extern int ext4_ind_trans_blocks(struct inode *inode, int nrblocks, int chunk); +extern int ext4_ind_trans_blocks(struct inode *inode, int nrblocks); extern void ext4_ind_truncate(handle_t *, struct inode *inode); extern int ext4_free_hole_blocks(handle_t *handle, struct inode *inode, ext4_lblk_t first, ext4_lblk_t stop); @@ -2166,42 +2169,96 @@ extern int ext4_alloc_flex_bg_array(struct super_block *sb, ext4_group_t ngroup); extern const char *ext4_decode_error(struct super_block *sb, int errno, char nbuf[16]); + extern __printf(4, 5) void __ext4_error(struct super_block *, const char *, unsigned int, const char *, ...); -#define ext4_error(sb, message...) __ext4_error(sb, __func__, \ - __LINE__, ## message) extern __printf(5, 6) -void ext4_error_inode(struct inode *, const char *, unsigned int, ext4_fsblk_t, +void __ext4_error_inode(struct inode *, const char *, unsigned int, ext4_fsblk_t, const char *, ...); extern __printf(5, 6) -void ext4_error_file(struct file *, const char *, unsigned int, ext4_fsblk_t, +void __ext4_error_file(struct file *, const char *, unsigned int, ext4_fsblk_t, const char *, ...); extern void __ext4_std_error(struct super_block *, const char *, unsigned int, int); extern __printf(4, 5) void __ext4_abort(struct super_block *, const char *, unsigned int, const char *, ...); -#define ext4_abort(sb, message...) __ext4_abort(sb, __func__, \ - __LINE__, ## message) extern __printf(4, 5) void __ext4_warning(struct super_block *, const char *, unsigned int, const char *, ...); -#define ext4_warning(sb, message...) __ext4_warning(sb, __func__, \ - __LINE__, ## message) extern __printf(3, 4) -void ext4_msg(struct super_block *, const char *, const char *, ...); +void __ext4_msg(struct super_block *, const char *, const char *, ...); extern void __dump_mmp_msg(struct super_block *, struct mmp_struct *mmp, const char *, unsigned int, const char *); -#define dump_mmp_msg(sb, mmp, msg) __dump_mmp_msg(sb, mmp, __func__, \ - __LINE__, msg) extern __printf(7, 8) void __ext4_grp_locked_error(const char *, unsigned int, struct super_block *, ext4_group_t, unsigned long, ext4_fsblk_t, const char *, ...); -#define ext4_grp_locked_error(sb, grp, message...) \ - __ext4_grp_locked_error(__func__, __LINE__, (sb), (grp), ## message) + +#ifdef CONFIG_PRINTK + +#define ext4_error_inode(inode, func, line, block, fmt, ...) \ + __ext4_error_inode(inode, func, line, block, fmt, ##__VA_ARGS__) +#define ext4_error_file(file, func, line, block, fmt, ...) \ + __ext4_error_file(file, func, line, block, fmt, ##__VA_ARGS__) +#define ext4_error(sb, fmt, ...) \ + __ext4_error(sb, __func__, __LINE__, fmt, ##__VA_ARGS__) +#define ext4_abort(sb, fmt, ...) \ + __ext4_abort(sb, __func__, __LINE__, fmt, ##__VA_ARGS__) +#define ext4_warning(sb, fmt, ...) \ + __ext4_warning(sb, __func__, __LINE__, fmt, ##__VA_ARGS__) +#define ext4_msg(sb, level, fmt, ...) \ + __ext4_msg(sb, level, fmt, ##__VA_ARGS__) +#define dump_mmp_msg(sb, mmp, msg) \ + __dump_mmp_msg(sb, mmp, __func__, __LINE__, msg) +#define ext4_grp_locked_error(sb, grp, ino, block, fmt, ...) \ + __ext4_grp_locked_error(__func__, __LINE__, sb, grp, ino, block, \ + fmt, ##__VA_ARGS__) + +#else + +#define ext4_error_inode(inode, func, line, block, fmt, ...) \ +do { \ + no_printk(fmt, ##__VA_ARGS__); \ + __ext4_error_inode(inode, "", 0, block, " "); \ +} while (0) +#define ext4_error_file(file, func, line, block, fmt, ...) \ +do { \ + no_printk(fmt, ##__VA_ARGS__); \ + __ext4_error_file(file, "", 0, block, " "); \ +} while (0) +#define ext4_error(sb, fmt, ...) \ +do { \ + no_printk(fmt, ##__VA_ARGS__); \ + __ext4_error(sb, "", 0, " "); \ +} while (0) +#define ext4_abort(sb, fmt, ...) \ +do { \ + no_printk(fmt, ##__VA_ARGS__); \ + __ext4_abort(sb, "", 0, " "); \ +} while (0) +#define ext4_warning(sb, fmt, ...) \ +do { \ + no_printk(fmt, ##__VA_ARGS__); \ + __ext4_warning(sb, "", 0, " "); \ +} while (0) +#define ext4_msg(sb, level, fmt, ...) \ +do { \ + no_printk(fmt, ##__VA_ARGS__); \ + __ext4_msg(sb, "", " "); \ +} while (0) +#define dump_mmp_msg(sb, mmp, msg) \ + __dump_mmp_msg(sb, mmp, "", 0, "") +#define ext4_grp_locked_error(sb, grp, ino, block, fmt, ...) \ +do { \ + no_printk(fmt, ##__VA_ARGS__); \ + __ext4_grp_locked_error("", 0, sb, grp, ino, block, " "); \ +} while (0) + +#endif + extern void ext4_update_dynamic_rev(struct super_block *sb); extern int ext4_update_compat_feature(handle_t *handle, struct super_block *sb, __u32 compat); @@ -2312,6 +2369,7 @@ struct ext4_group_info *ext4_get_group_info(struct super_block *sb, { struct ext4_group_info ***grp_info; long indexv, indexh; + BUG_ON(group >= EXT4_SB(sb)->s_groups_count); grp_info = EXT4_SB(sb)->s_group_info; indexv = group >> (EXT4_DESC_PER_BLOCK_BITS(sb)); indexh = group & ((EXT4_DESC_PER_BLOCK(sb)) - 1); @@ -2515,7 +2573,7 @@ extern int ext4_try_create_inline_dir(handle_t *handle, struct inode *parent, struct inode *inode); extern int ext4_read_inline_dir(struct file *filp, - void *dirent, filldir_t filldir, + struct dir_context *ctx, int *has_inline_data); extern int htree_inlinedir_to_tree(struct file *dir_file, struct inode *dir, ext4_lblk_t block, @@ -2598,8 +2656,7 @@ struct ext4_extent; extern int ext4_ext_tree_init(handle_t *handle, struct inode *); extern int ext4_ext_writepage_trans_blocks(struct inode *, int); -extern int ext4_ext_index_trans_blocks(struct inode *inode, int nrblocks, - int chunk); +extern int ext4_ext_index_trans_blocks(struct inode *inode, int extents); extern int ext4_ext_map_blocks(handle_t *handle, struct inode *inode, struct ext4_map_blocks *map, int flags); extern void ext4_ext_truncate(handle_t *, struct inode *); @@ -2609,8 +2666,8 @@ extern void ext4_ext_init(struct super_block *); extern void ext4_ext_release(struct super_block *); extern long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len); -extern int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset, - ssize_t len); +extern int ext4_convert_unwritten_extents(handle_t *handle, struct inode *inode, + loff_t offset, ssize_t len); extern int ext4_map_blocks(handle_t *handle, struct inode *inode, struct ext4_map_blocks *map, int flags); extern int ext4_ext_calc_metadata_amount(struct inode *inode, @@ -2650,12 +2707,15 @@ extern int ext4_move_extents(struct file *o_filp, struct file *d_filp, /* page-io.c */ extern int __init ext4_init_pageio(void); -extern void ext4_add_complete_io(ext4_io_end_t *io_end); extern void ext4_exit_pageio(void); -extern void ext4_ioend_shutdown(struct inode *); -extern void ext4_free_io_end(ext4_io_end_t *io); extern ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags); -extern void ext4_end_io_work(struct work_struct *work); +extern ext4_io_end_t *ext4_get_io_end(ext4_io_end_t *io_end); +extern int ext4_put_io_end(ext4_io_end_t *io_end); +extern void ext4_put_io_end_defer(ext4_io_end_t *io_end); +extern void ext4_io_submit_init(struct ext4_io_submit *io, + struct writeback_control *wbc); +extern void ext4_end_io_rsv_work(struct work_struct *work); +extern void ext4_end_io_unrsv_work(struct work_struct *work); extern void ext4_io_submit(struct ext4_io_submit *io); extern int ext4_bio_write_page(struct ext4_io_submit *io, struct page *page, @@ -2668,20 +2728,17 @@ extern void ext4_mmp_csum_set(struct super_block *sb, struct mmp_struct *mmp); extern int ext4_mmp_csum_verify(struct super_block *sb, struct mmp_struct *mmp); -/* BH_Uninit flag: blocks are allocated but uninitialized on disk */ +/* + * Note that these flags will never ever appear in a buffer_head's state flag. + * See EXT4_MAP_... to see where this is used. + */ enum ext4_state_bits { BH_Uninit /* blocks are allocated but uninitialized on disk */ - = BH_JBDPrivateStart, + = BH_JBDPrivateStart, BH_AllocFromCluster, /* allocated blocks were part of already - * allocated cluster. Note that this flag will - * never, ever appear in a buffer_head's state - * flag. See EXT4_MAP_FROM_CLUSTER to see where - * this is used. */ + * allocated cluster. */ }; -BUFFER_FNS(Uninit, uninit) -TAS_BUFFER_FNS(Uninit, uninit) - /* * Add new method to test whether block and inode bitmaps are properly * initialized. With uninit_bg reading the block from disk is not enough diff --git a/fs/ext4/ext4_jbd2.c b/fs/ext4/ext4_jbd2.c index 451eb4045330..72a3600aedbd 100644 --- a/fs/ext4/ext4_jbd2.c +++ b/fs/ext4/ext4_jbd2.c @@ -38,31 +38,43 @@ static void ext4_put_nojournal(handle_t *handle) /* * Wrappers for jbd2_journal_start/end. */ -handle_t *__ext4_journal_start_sb(struct super_block *sb, unsigned int line, - int type, int nblocks) +static int ext4_journal_check_start(struct super_block *sb) { journal_t *journal; might_sleep(); - - trace_ext4_journal_start(sb, nblocks, _RET_IP_); if (sb->s_flags & MS_RDONLY) - return ERR_PTR(-EROFS); - + return -EROFS; WARN_ON(sb->s_writers.frozen == SB_FREEZE_COMPLETE); journal = EXT4_SB(sb)->s_journal; - if (!journal) - return ext4_get_nojournal(); /* * Special case here: if the journal has aborted behind our * backs (eg. EIO in the commit thread), then we still need to * take the FS itself readonly cleanly. */ - if (is_journal_aborted(journal)) { + if (journal && is_journal_aborted(journal)) { ext4_abort(sb, "Detected aborted journal"); - return ERR_PTR(-EROFS); + return -EROFS; } - return jbd2__journal_start(journal, nblocks, GFP_NOFS, type, line); + return 0; +} + +handle_t *__ext4_journal_start_sb(struct super_block *sb, unsigned int line, + int type, int blocks, int rsv_blocks) +{ + journal_t *journal; + int err; + + trace_ext4_journal_start(sb, blocks, rsv_blocks, _RET_IP_); + err = ext4_journal_check_start(sb); + if (err < 0) + return ERR_PTR(err); + + journal = EXT4_SB(sb)->s_journal; + if (!journal) + return ext4_get_nojournal(); + return jbd2__journal_start(journal, blocks, rsv_blocks, GFP_NOFS, + type, line); } int __ext4_journal_stop(const char *where, unsigned int line, handle_t *handle) @@ -86,6 +98,30 @@ int __ext4_journal_stop(const char *where, unsigned int line, handle_t *handle) return err; } +handle_t *__ext4_journal_start_reserved(handle_t *handle, unsigned int line, + int type) +{ + struct super_block *sb; + int err; + + if (!ext4_handle_valid(handle)) + return ext4_get_nojournal(); + + sb = handle->h_journal->j_private; + trace_ext4_journal_start_reserved(sb, handle->h_buffer_credits, + _RET_IP_); + err = ext4_journal_check_start(sb); + if (err < 0) { + jbd2_journal_free_reserved(handle); + return ERR_PTR(err); + } + + err = jbd2_journal_start_reserved(handle, type, line); + if (err < 0) + return ERR_PTR(err); + return handle; +} + void ext4_journal_abort_handle(const char *caller, unsigned int line, const char *err_fn, struct buffer_head *bh, handle_t *handle, int err) diff --git a/fs/ext4/ext4_jbd2.h b/fs/ext4/ext4_jbd2.h index c8c6885406db..2877258d9497 100644 --- a/fs/ext4/ext4_jbd2.h +++ b/fs/ext4/ext4_jbd2.h @@ -134,7 +134,8 @@ static inline int ext4_jbd2_credits_xattr(struct inode *inode) #define EXT4_HT_MIGRATE 8 #define EXT4_HT_MOVE_EXTENTS 9 #define EXT4_HT_XATTR 10 -#define EXT4_HT_MAX 11 +#define EXT4_HT_EXT_CONVERT 11 +#define EXT4_HT_MAX 12 /** * struct ext4_journal_cb_entry - Base structure for callback information. @@ -265,7 +266,7 @@ int __ext4_handle_dirty_super(const char *where, unsigned int line, __ext4_handle_dirty_super(__func__, __LINE__, (handle), (sb)) handle_t *__ext4_journal_start_sb(struct super_block *sb, unsigned int line, - int type, int nblocks); + int type, int blocks, int rsv_blocks); int __ext4_journal_stop(const char *where, unsigned int line, handle_t *handle); #define EXT4_NOJOURNAL_MAX_REF_COUNT ((unsigned long) 4096) @@ -300,21 +301,37 @@ static inline int ext4_handle_has_enough_credits(handle_t *handle, int needed) } #define ext4_journal_start_sb(sb, type, nblocks) \ - __ext4_journal_start_sb((sb), __LINE__, (type), (nblocks)) + __ext4_journal_start_sb((sb), __LINE__, (type), (nblocks), 0) #define ext4_journal_start(inode, type, nblocks) \ - __ext4_journal_start((inode), __LINE__, (type), (nblocks)) + __ext4_journal_start((inode), __LINE__, (type), (nblocks), 0) + +#define ext4_journal_start_with_reserve(inode, type, blocks, rsv_blocks) \ + __ext4_journal_start((inode), __LINE__, (type), (blocks), (rsv_blocks)) static inline handle_t *__ext4_journal_start(struct inode *inode, unsigned int line, int type, - int nblocks) + int blocks, int rsv_blocks) { - return __ext4_journal_start_sb(inode->i_sb, line, type, nblocks); + return __ext4_journal_start_sb(inode->i_sb, line, type, blocks, + rsv_blocks); } #define ext4_journal_stop(handle) \ __ext4_journal_stop(__func__, __LINE__, (handle)) +#define ext4_journal_start_reserved(handle, type) \ + __ext4_journal_start_reserved((handle), __LINE__, (type)) + +handle_t *__ext4_journal_start_reserved(handle_t *handle, unsigned int line, + int type); + +static inline void ext4_journal_free_reserved(handle_t *handle) +{ + if (ext4_handle_valid(handle)) + jbd2_journal_free_reserved(handle); +} + static inline handle_t *ext4_journal_current_handle(void) { return journal_current_handle(); diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index bc0f1910b9cf..7097b0f680e6 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c @@ -2125,7 +2125,8 @@ static int ext4_fill_fiemap_extents(struct inode *inode, next_del = ext4_find_delayed_extent(inode, &es); if (!exists && next_del) { exists = 1; - flags |= FIEMAP_EXTENT_DELALLOC; + flags |= (FIEMAP_EXTENT_DELALLOC | + FIEMAP_EXTENT_UNKNOWN); } up_read(&EXT4_I(inode)->i_data_sem); @@ -2328,17 +2329,15 @@ int ext4_ext_calc_credits_for_single_extent(struct inode *inode, int nrblocks, } /* - * How many index/leaf blocks need to change/allocate to modify nrblocks? + * How many index/leaf blocks need to change/allocate to add @extents extents? * - * if nrblocks are fit in a single extent (chunk flag is 1), then - * in the worse case, each tree level index/leaf need to be changed - * if the tree split due to insert a new extent, then the old tree - * index/leaf need to be updated too + * If we add a single extent, then in the worse case, each tree level + * index/leaf need to be changed in case of the tree split. * - * If the nrblocks are discontiguous, they could cause - * the whole tree split more than once, but this is really rare. + * If more extents are inserted, they could cause the whole tree split more + * than once, but this is really rare. */ -int ext4_ext_index_trans_blocks(struct inode *inode, int nrblocks, int chunk) +int ext4_ext_index_trans_blocks(struct inode *inode, int extents) { int index; int depth; @@ -2349,7 +2348,7 @@ int ext4_ext_index_trans_blocks(struct inode *inode, int nrblocks, int chunk) depth = ext_depth(inode); - if (chunk) + if (extents <= 1) index = depth * 2; else index = depth * 3; @@ -2357,20 +2356,24 @@ int ext4_ext_index_trans_blocks(struct inode *inode, int nrblocks, int chunk) return index; } +static inline int get_default_free_blocks_flags(struct inode *inode) +{ + if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) + return EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET; + else if (ext4_should_journal_data(inode)) + return EXT4_FREE_BLOCKS_FORGET; + return 0; +} + static int ext4_remove_blocks(handle_t *handle, struct inode *inode, struct ext4_extent *ex, - ext4_fsblk_t *partial_cluster, + long long *partial_cluster, ext4_lblk_t from, ext4_lblk_t to) { struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); unsigned short ee_len = ext4_ext_get_actual_len(ex); ext4_fsblk_t pblk; - int flags = 0; - - if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) - flags |= EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET; - else if (ext4_should_journal_data(inode)) - flags |= EXT4_FREE_BLOCKS_FORGET; + int flags = get_default_free_blocks_flags(inode); /* * For bigalloc file systems, we never free a partial cluster @@ -2388,7 +2391,8 @@ static int ext4_remove_blocks(handle_t *handle, struct inode *inode, * partial cluster here. */ pblk = ext4_ext_pblock(ex) + ee_len - 1; - if (*partial_cluster && (EXT4_B2C(sbi, pblk) != *partial_cluster)) { + if ((*partial_cluster > 0) && + (EXT4_B2C(sbi, pblk) != *partial_cluster)) { ext4_free_blocks(handle, inode, NULL, EXT4_C2B(sbi, *partial_cluster), sbi->s_cluster_ratio, flags); @@ -2414,41 +2418,46 @@ static int ext4_remove_blocks(handle_t *handle, struct inode *inode, && to == le32_to_cpu(ex->ee_block) + ee_len - 1) { /* tail removal */ ext4_lblk_t num; + unsigned int unaligned; num = le32_to_cpu(ex->ee_block) + ee_len - from; pblk = ext4_ext_pblock(ex) + ee_len - num; - ext_debug("free last %u blocks starting %llu\n", num, pblk); + /* + * Usually we want to free partial cluster at the end of the + * extent, except for the situation when the cluster is still + * used by any other extent (partial_cluster is negative). + */ + if (*partial_cluster < 0 && + -(*partial_cluster) == EXT4_B2C(sbi, pblk + num - 1)) + flags |= EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER; + + ext_debug("free last %u blocks starting %llu partial %lld\n", + num, pblk, *partial_cluster); ext4_free_blocks(handle, inode, NULL, pblk, num, flags); /* * If the block range to be freed didn't start at the * beginning of a cluster, and we removed the entire - * extent, save the partial cluster here, since we - * might need to delete if we determine that the - * truncate operation has removed all of the blocks in - * the cluster. + * extent and the cluster is not used by any other extent, + * save the partial cluster here, since we might need to + * delete if we determine that the truncate operation has + * removed all of the blocks in the cluster. + * + * On the other hand, if we did not manage to free the whole + * extent, we have to mark the cluster as used (store negative + * cluster number in partial_cluster). */ - if (pblk & (sbi->s_cluster_ratio - 1) && - (ee_len == num)) + unaligned = pblk & (sbi->s_cluster_ratio - 1); + if (unaligned && (ee_len == num) && + (*partial_cluster != -((long long)EXT4_B2C(sbi, pblk)))) *partial_cluster = EXT4_B2C(sbi, pblk); - else + else if (unaligned) + *partial_cluster = -((long long)EXT4_B2C(sbi, pblk)); + else if (*partial_cluster > 0) *partial_cluster = 0; - } else if (from == le32_to_cpu(ex->ee_block) - && to <= le32_to_cpu(ex->ee_block) + ee_len - 1) { - /* head removal */ - ext4_lblk_t num; - ext4_fsblk_t start; - - num = to - from; - start = ext4_ext_pblock(ex); - - ext_debug("free first %u blocks starting %llu\n", num, start); - ext4_free_blocks(handle, inode, NULL, start, num, flags); - - } else { - printk(KERN_INFO "strange request: removal(2) " - "%u-%u from %u:%u\n", - from, to, le32_to_cpu(ex->ee_block), ee_len); - } + } else + ext4_error(sbi->s_sb, "strange request: removal(2) " + "%u-%u from %u:%u\n", + from, to, le32_to_cpu(ex->ee_block), ee_len); return 0; } @@ -2461,12 +2470,16 @@ static int ext4_remove_blocks(handle_t *handle, struct inode *inode, * @handle: The journal handle * @inode: The files inode * @path: The path to the leaf + * @partial_cluster: The cluster which we'll have to free if all extents + * has been released from it. It gets negative in case + * that the cluster is still used. * @start: The first block to remove * @end: The last block to remove */ static int ext4_ext_rm_leaf(handle_t *handle, struct inode *inode, - struct ext4_ext_path *path, ext4_fsblk_t *partial_cluster, + struct ext4_ext_path *path, + long long *partial_cluster, ext4_lblk_t start, ext4_lblk_t end) { struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); @@ -2479,6 +2492,7 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode, unsigned short ex_ee_len; unsigned uninitialized = 0; struct ext4_extent *ex; + ext4_fsblk_t pblk; /* the header must be checked already in ext4_ext_remove_space() */ ext_debug("truncate since %u in leaf to %u\n", start, end); @@ -2490,7 +2504,9 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode, return -EIO; } /* find where to start removing */ - ex = EXT_LAST_EXTENT(eh); + ex = path[depth].p_ext; + if (!ex) + ex = EXT_LAST_EXTENT(eh); ex_ee_block = le32_to_cpu(ex->ee_block); ex_ee_len = ext4_ext_get_actual_len(ex); @@ -2517,6 +2533,16 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode, /* If this extent is beyond the end of the hole, skip it */ if (end < ex_ee_block) { + /* + * We're going to skip this extent and move to another, + * so if this extent is not cluster aligned we have + * to mark the current cluster as used to avoid + * accidentally freeing it later on + */ + pblk = ext4_ext_pblock(ex); + if (pblk & (sbi->s_cluster_ratio - 1)) + *partial_cluster = + -((long long)EXT4_B2C(sbi, pblk)); ex--; ex_ee_block = le32_to_cpu(ex->ee_block); ex_ee_len = ext4_ext_get_actual_len(ex); @@ -2592,7 +2618,7 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode, sizeof(struct ext4_extent)); } le16_add_cpu(&eh->eh_entries, -1); - } else + } else if (*partial_cluster > 0) *partial_cluster = 0; err = ext4_ext_dirty(handle, inode, path + depth); @@ -2610,17 +2636,13 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode, err = ext4_ext_correct_indexes(handle, inode, path); /* - * If there is still a entry in the leaf node, check to see if - * it references the partial cluster. This is the only place - * where it could; if it doesn't, we can free the cluster. + * Free the partial cluster only if the current extent does not + * reference it. Otherwise we might free used cluster. */ - if (*partial_cluster && ex >= EXT_FIRST_EXTENT(eh) && + if (*partial_cluster > 0 && (EXT4_B2C(sbi, ext4_ext_pblock(ex) + ex_ee_len - 1) != *partial_cluster)) { - int flags = EXT4_FREE_BLOCKS_FORGET; - - if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) - flags |= EXT4_FREE_BLOCKS_METADATA; + int flags = get_default_free_blocks_flags(inode); ext4_free_blocks(handle, inode, NULL, EXT4_C2B(sbi, *partial_cluster), @@ -2664,7 +2686,7 @@ int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start, struct super_block *sb = inode->i_sb; int depth = ext_depth(inode); struct ext4_ext_path *path = NULL; - ext4_fsblk_t partial_cluster = 0; + long long partial_cluster = 0; handle_t *handle; int i = 0, err = 0; @@ -2676,7 +2698,7 @@ int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start, return PTR_ERR(handle); again: - trace_ext4_ext_remove_space(inode, start, depth); + trace_ext4_ext_remove_space(inode, start, end, depth); /* * Check if we are removing extents inside the extent tree. If that @@ -2844,17 +2866,14 @@ again: } } - trace_ext4_ext_remove_space_done(inode, start, depth, partial_cluster, - path->p_hdr->eh_entries); + trace_ext4_ext_remove_space_done(inode, start, end, depth, + partial_cluster, path->p_hdr->eh_entries); /* If we still have something in the partial cluster and we have removed * even the first extent, then we should free the blocks in the partial * cluster as well. */ - if (partial_cluster && path->p_hdr->eh_entries == 0) { - int flags = EXT4_FREE_BLOCKS_FORGET; - - if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) - flags |= EXT4_FREE_BLOCKS_METADATA; + if (partial_cluster > 0 && path->p_hdr->eh_entries == 0) { + int flags = get_default_free_blocks_flags(inode); ext4_free_blocks(handle, inode, NULL, EXT4_C2B(EXT4_SB(sb), partial_cluster), @@ -4363,7 +4382,7 @@ out2: } out3: - trace_ext4_ext_map_blocks_exit(inode, map, err ? err : allocated); + trace_ext4_ext_map_blocks_exit(inode, flags, map, err ? err : allocated); return err ? err : allocated; } @@ -4446,7 +4465,7 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len) return -EOPNOTSUPP; if (mode & FALLOC_FL_PUNCH_HOLE) - return ext4_punch_hole(file, offset, len); + return ext4_punch_hole(inode, offset, len); ret = ext4_convert_inline_data(inode); if (ret) @@ -4548,10 +4567,9 @@ retry: * function, to convert the fallocated extents after IO is completed. * Returns 0 on success. */ -int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset, - ssize_t len) +int ext4_convert_unwritten_extents(handle_t *handle, struct inode *inode, + loff_t offset, ssize_t len) { - handle_t *handle; unsigned int max_blocks; int ret = 0; int ret2 = 0; @@ -4566,16 +4584,32 @@ int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset, max_blocks = ((EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits) - map.m_lblk); /* - * credits to insert 1 extent into extent tree + * This is somewhat ugly but the idea is clear: When transaction is + * reserved, everything goes into it. Otherwise we rather start several + * smaller transactions for conversion of each extent separately. */ - credits = ext4_chunk_trans_blocks(inode, max_blocks); + if (handle) { + handle = ext4_journal_start_reserved(handle, + EXT4_HT_EXT_CONVERT); + if (IS_ERR(handle)) + return PTR_ERR(handle); + credits = 0; + } else { + /* + * credits to insert 1 extent into extent tree + */ + credits = ext4_chunk_trans_blocks(inode, max_blocks); + } while (ret >= 0 && ret < max_blocks) { map.m_lblk += ret; map.m_len = (max_blocks -= ret); - handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS, credits); - if (IS_ERR(handle)) { - ret = PTR_ERR(handle); - break; + if (credits) { + handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS, + credits); + if (IS_ERR(handle)) { + ret = PTR_ERR(handle); + break; + } } ret = ext4_map_blocks(handle, inode, &map, EXT4_GET_BLOCKS_IO_CONVERT_EXT); @@ -4586,10 +4620,13 @@ int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset, inode->i_ino, map.m_lblk, map.m_len, ret); ext4_mark_inode_dirty(handle, inode); - ret2 = ext4_journal_stop(handle); - if (ret <= 0 || ret2 ) + if (credits) + ret2 = ext4_journal_stop(handle); + if (ret <= 0 || ret2) break; } + if (!credits) + ret2 = ext4_journal_stop(handle); return ret > 0 ? ret2 : ret; } @@ -4659,7 +4696,7 @@ static int ext4_xattr_fiemap(struct inode *inode, error = ext4_get_inode_loc(inode, &iloc); if (error) return error; - physical = iloc.bh->b_blocknr << blockbits; + physical = (__u64)iloc.bh->b_blocknr << blockbits; offset = EXT4_GOOD_OLD_INODE_SIZE + EXT4_I(inode)->i_extra_isize; physical += offset; @@ -4667,7 +4704,7 @@ static int ext4_xattr_fiemap(struct inode *inode, flags |= FIEMAP_EXTENT_DATA_INLINE; brelse(iloc.bh); } else { /* external block */ - physical = EXT4_I(inode)->i_file_acl << blockbits; + physical = (__u64)EXT4_I(inode)->i_file_acl << blockbits; length = inode->i_sb->s_blocksize; } diff --git a/fs/ext4/extents_status.c b/fs/ext4/extents_status.c index e6941e622d31..ee018d5f397e 100644 --- a/fs/ext4/extents_status.c +++ b/fs/ext4/extents_status.c @@ -10,6 +10,7 @@ * Ext4 extents status tree core functions. */ #include <linux/rbtree.h> +#include <linux/list_sort.h> #include "ext4.h" #include "extents_status.h" #include "ext4_extents.h" @@ -291,7 +292,6 @@ out: read_unlock(&EXT4_I(inode)->i_es_lock); - ext4_es_lru_add(inode); trace_ext4_es_find_delayed_extent_range_exit(inode, es); } @@ -672,7 +672,6 @@ int ext4_es_insert_extent(struct inode *inode, ext4_lblk_t lblk, error: write_unlock(&EXT4_I(inode)->i_es_lock); - ext4_es_lru_add(inode); ext4_es_print_tree(inode); return err; @@ -734,7 +733,6 @@ out: read_unlock(&EXT4_I(inode)->i_es_lock); - ext4_es_lru_add(inode); trace_ext4_es_lookup_extent_exit(inode, es, found); return found; } @@ -878,12 +876,28 @@ int ext4_es_zeroout(struct inode *inode, struct ext4_extent *ex) EXTENT_STATUS_WRITTEN); } +static int ext4_inode_touch_time_cmp(void *priv, struct list_head *a, + struct list_head *b) +{ + struct ext4_inode_info *eia, *eib; + eia = list_entry(a, struct ext4_inode_info, i_es_lru); + eib = list_entry(b, struct ext4_inode_info, i_es_lru); + + if (eia->i_touch_when == eib->i_touch_when) + return 0; + if (time_after(eia->i_touch_when, eib->i_touch_when)) + return 1; + else + return -1; +} + static int ext4_es_shrink(struct shrinker *shrink, struct shrink_control *sc) { struct ext4_sb_info *sbi = container_of(shrink, struct ext4_sb_info, s_es_shrinker); struct ext4_inode_info *ei; - struct list_head *cur, *tmp, scanned; + struct list_head *cur, *tmp; + LIST_HEAD(skiped); int nr_to_scan = sc->nr_to_scan; int ret, nr_shrunk = 0; @@ -893,23 +907,41 @@ static int ext4_es_shrink(struct shrinker *shrink, struct shrink_control *sc) if (!nr_to_scan) return ret; - INIT_LIST_HEAD(&scanned); - spin_lock(&sbi->s_es_lru_lock); + + /* + * If the inode that is at the head of LRU list is newer than + * last_sorted time, that means that we need to sort this list. + */ + ei = list_first_entry(&sbi->s_es_lru, struct ext4_inode_info, i_es_lru); + if (sbi->s_es_last_sorted < ei->i_touch_when) { + list_sort(NULL, &sbi->s_es_lru, ext4_inode_touch_time_cmp); + sbi->s_es_last_sorted = jiffies; + } + list_for_each_safe(cur, tmp, &sbi->s_es_lru) { - list_move_tail(cur, &scanned); + /* + * If we have already reclaimed all extents from extent + * status tree, just stop the loop immediately. + */ + if (percpu_counter_read_positive(&sbi->s_extent_cache_cnt) == 0) + break; ei = list_entry(cur, struct ext4_inode_info, i_es_lru); - read_lock(&ei->i_es_lock); - if (ei->i_es_lru_nr == 0) { - read_unlock(&ei->i_es_lock); + /* Skip the inode that is newer than the last_sorted time */ + if (sbi->s_es_last_sorted < ei->i_touch_when) { + list_move_tail(cur, &skiped); continue; } - read_unlock(&ei->i_es_lock); + + if (ei->i_es_lru_nr == 0) + continue; write_lock(&ei->i_es_lock); ret = __es_try_to_reclaim_extents(ei, nr_to_scan); + if (ei->i_es_lru_nr == 0) + list_del_init(&ei->i_es_lru); write_unlock(&ei->i_es_lock); nr_shrunk += ret; @@ -917,7 +949,9 @@ static int ext4_es_shrink(struct shrinker *shrink, struct shrink_control *sc) if (nr_to_scan == 0) break; } - list_splice_tail(&scanned, &sbi->s_es_lru); + + /* Move the newer inodes into the tail of the LRU list. */ + list_splice_tail(&skiped, &sbi->s_es_lru); spin_unlock(&sbi->s_es_lru_lock); ret = percpu_counter_read_positive(&sbi->s_extent_cache_cnt); @@ -925,21 +959,19 @@ static int ext4_es_shrink(struct shrinker *shrink, struct shrink_control *sc) return ret; } -void ext4_es_register_shrinker(struct super_block *sb) +void ext4_es_register_shrinker(struct ext4_sb_info *sbi) { - struct ext4_sb_info *sbi; - - sbi = EXT4_SB(sb); INIT_LIST_HEAD(&sbi->s_es_lru); spin_lock_init(&sbi->s_es_lru_lock); + sbi->s_es_last_sorted = 0; sbi->s_es_shrinker.shrink = ext4_es_shrink; sbi->s_es_shrinker.seeks = DEFAULT_SEEKS; register_shrinker(&sbi->s_es_shrinker); } -void ext4_es_unregister_shrinker(struct super_block *sb) +void ext4_es_unregister_shrinker(struct ext4_sb_info *sbi) { - unregister_shrinker(&EXT4_SB(sb)->s_es_shrinker); + unregister_shrinker(&sbi->s_es_shrinker); } void ext4_es_lru_add(struct inode *inode) @@ -947,11 +979,14 @@ void ext4_es_lru_add(struct inode *inode) struct ext4_inode_info *ei = EXT4_I(inode); struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); + ei->i_touch_when = jiffies; + + if (!list_empty(&ei->i_es_lru)) + return; + spin_lock(&sbi->s_es_lru_lock); if (list_empty(&ei->i_es_lru)) list_add_tail(&ei->i_es_lru, &sbi->s_es_lru); - else - list_move_tail(&ei->i_es_lru, &sbi->s_es_lru); spin_unlock(&sbi->s_es_lru_lock); } diff --git a/fs/ext4/extents_status.h b/fs/ext4/extents_status.h index f740eb03b707..e936730cc5b0 100644 --- a/fs/ext4/extents_status.h +++ b/fs/ext4/extents_status.h @@ -39,6 +39,7 @@ EXTENT_STATUS_DELAYED | \ EXTENT_STATUS_HOLE) +struct ext4_sb_info; struct ext4_extent; struct extent_status { @@ -119,8 +120,8 @@ static inline void ext4_es_store_status(struct extent_status *es, es->es_pblk = block; } -extern void ext4_es_register_shrinker(struct super_block *sb); -extern void ext4_es_unregister_shrinker(struct super_block *sb); +extern void ext4_es_register_shrinker(struct ext4_sb_info *sbi); +extern void ext4_es_unregister_shrinker(struct ext4_sb_info *sbi); extern void ext4_es_lru_add(struct inode *inode); extern void ext4_es_lru_del(struct inode *inode); diff --git a/fs/ext4/file.c b/fs/ext4/file.c index b1b4d51b5d86..6f4cc567c382 100644 --- a/fs/ext4/file.c +++ b/fs/ext4/file.c @@ -312,7 +312,7 @@ static int ext4_find_unwritten_pgoff(struct inode *inode, blkbits = inode->i_sb->s_blocksize_bits; startoff = *offset; lastoff = startoff; - endoff = (map->m_lblk + map->m_len) << blkbits; + endoff = (loff_t)(map->m_lblk + map->m_len) << blkbits; index = startoff >> PAGE_CACHE_SHIFT; end = endoff >> PAGE_CACHE_SHIFT; @@ -457,7 +457,7 @@ static loff_t ext4_seek_data(struct file *file, loff_t offset, loff_t maxsize) ret = ext4_map_blocks(NULL, inode, &map, 0); if (ret > 0 && !(map.m_flags & EXT4_MAP_UNWRITTEN)) { if (last != start) - dataoff = last << blkbits; + dataoff = (loff_t)last << blkbits; break; } @@ -468,7 +468,7 @@ static loff_t ext4_seek_data(struct file *file, loff_t offset, loff_t maxsize) ext4_es_find_delayed_extent_range(inode, last, last, &es); if (es.es_len != 0 && in_range(last, es.es_lblk, es.es_len)) { if (last != start) - dataoff = last << blkbits; + dataoff = (loff_t)last << blkbits; break; } @@ -486,7 +486,7 @@ static loff_t ext4_seek_data(struct file *file, loff_t offset, loff_t maxsize) } last++; - dataoff = last << blkbits; + dataoff = (loff_t)last << blkbits; } while (last <= end); mutex_unlock(&inode->i_mutex); @@ -494,17 +494,7 @@ static loff_t ext4_seek_data(struct file *file, loff_t offset, loff_t maxsize) if (dataoff > isize) return -ENXIO; - if (dataoff < 0 && !(file->f_mode & FMODE_UNSIGNED_OFFSET)) - return -EINVAL; - if (dataoff > maxsize) - return -EINVAL; - - if (dataoff != file->f_pos) { - file->f_pos = dataoff; - file->f_version = 0; - } - - return dataoff; + return vfs_setpos(file, dataoff, maxsize); } /* @@ -540,7 +530,7 @@ static loff_t ext4_seek_hole(struct file *file, loff_t offset, loff_t maxsize) ret = ext4_map_blocks(NULL, inode, &map, 0); if (ret > 0 && !(map.m_flags & EXT4_MAP_UNWRITTEN)) { last += ret; - holeoff = last << blkbits; + holeoff = (loff_t)last << blkbits; continue; } @@ -551,7 +541,7 @@ static loff_t ext4_seek_hole(struct file *file, loff_t offset, loff_t maxsize) ext4_es_find_delayed_extent_range(inode, last, last, &es); if (es.es_len != 0 && in_range(last, es.es_lblk, es.es_len)) { last = es.es_lblk + es.es_len; - holeoff = last << blkbits; + holeoff = (loff_t)last << blkbits; continue; } @@ -566,7 +556,7 @@ static loff_t ext4_seek_hole(struct file *file, loff_t offset, loff_t maxsize) &map, &holeoff); if (!unwritten) { last += ret; - holeoff = last << blkbits; + holeoff = (loff_t)last << blkbits; continue; } } @@ -580,17 +570,7 @@ static loff_t ext4_seek_hole(struct file *file, loff_t offset, loff_t maxsize) if (holeoff > isize) holeoff = isize; - if (holeoff < 0 && !(file->f_mode & FMODE_UNSIGNED_OFFSET)) - return -EINVAL; - if (holeoff > maxsize) - return -EINVAL; - - if (holeoff != file->f_pos) { - file->f_pos = holeoff; - file->f_version = 0; - } - - return holeoff; + return vfs_setpos(file, holeoff, maxsize); } /* diff --git a/fs/ext4/fsync.c b/fs/ext4/fsync.c index e0ba8a408def..a8bc47f75fa0 100644 --- a/fs/ext4/fsync.c +++ b/fs/ext4/fsync.c @@ -73,32 +73,6 @@ static int ext4_sync_parent(struct inode *inode) return ret; } -/** - * __sync_file - generic_file_fsync without the locking and filemap_write - * @inode: inode to sync - * @datasync: only sync essential metadata if true - * - * This is just generic_file_fsync without the locking. This is needed for - * nojournal mode to make sure this inodes data/metadata makes it to disk - * properly. The i_mutex should be held already. - */ -static int __sync_inode(struct inode *inode, int datasync) -{ - int err; - int ret; - - ret = sync_mapping_buffers(inode->i_mapping); - if (!(inode->i_state & I_DIRTY)) - return ret; - if (datasync && !(inode->i_state & I_DIRTY_DATASYNC)) - return ret; - - err = sync_inode_metadata(inode, 1); - if (ret == 0) - ret = err; - return ret; -} - /* * akpm: A new design for ext4_sync_file(). * @@ -116,7 +90,7 @@ int ext4_sync_file(struct file *file, loff_t start, loff_t end, int datasync) struct inode *inode = file->f_mapping->host; struct ext4_inode_info *ei = EXT4_I(inode); journal_t *journal = EXT4_SB(inode->i_sb)->s_journal; - int ret, err; + int ret = 0, err; tid_t commit_tid; bool needs_barrier = false; @@ -124,25 +98,24 @@ int ext4_sync_file(struct file *file, loff_t start, loff_t end, int datasync) trace_ext4_sync_file_enter(file, datasync); - ret = filemap_write_and_wait_range(inode->i_mapping, start, end); - if (ret) - return ret; - mutex_lock(&inode->i_mutex); - - if (inode->i_sb->s_flags & MS_RDONLY) - goto out; - - ret = ext4_flush_unwritten_io(inode); - if (ret < 0) + if (inode->i_sb->s_flags & MS_RDONLY) { + /* Make sure that we read updated s_mount_flags value */ + smp_rmb(); + if (EXT4_SB(inode->i_sb)->s_mount_flags & EXT4_MF_FS_ABORTED) + ret = -EROFS; goto out; + } if (!journal) { - ret = __sync_inode(inode, datasync); + ret = generic_file_fsync(file, start, end, datasync); if (!ret && !hlist_empty(&inode->i_dentry)) ret = ext4_sync_parent(inode); goto out; } + ret = filemap_write_and_wait_range(inode->i_mapping, start, end); + if (ret) + return ret; /* * data=writeback,ordered: * The caller's filemap_fdatawrite()/wait will sync the data. @@ -172,8 +145,7 @@ int ext4_sync_file(struct file *file, loff_t start, loff_t end, int datasync) if (!ret) ret = err; } - out: - mutex_unlock(&inode->i_mutex); +out: trace_ext4_sync_file_exit(inode, ret); return ret; } diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c index 00a818d67b54..f03598c6ffd3 100644 --- a/fs/ext4/ialloc.c +++ b/fs/ext4/ialloc.c @@ -747,7 +747,8 @@ repeat_in_this_group: if (!handle) { BUG_ON(nblocks <= 0); handle = __ext4_journal_start_sb(dir->i_sb, line_no, - handle_type, nblocks); + handle_type, nblocks, + 0); if (IS_ERR(handle)) { err = PTR_ERR(handle); ext4_std_error(sb, err); diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c index b8d5d351e24f..87b30cd357e7 100644 --- a/fs/ext4/indirect.c +++ b/fs/ext4/indirect.c @@ -624,7 +624,7 @@ cleanup: partial--; } out: - trace_ext4_ind_map_blocks_exit(inode, map, err); + trace_ext4_ind_map_blocks_exit(inode, flags, map, err); return err; } @@ -675,11 +675,6 @@ ssize_t ext4_ind_direct_IO(int rw, struct kiocb *iocb, retry: if (rw == READ && ext4_should_dioread_nolock(inode)) { - if (unlikely(atomic_read(&EXT4_I(inode)->i_unwritten))) { - mutex_lock(&inode->i_mutex); - ext4_flush_unwritten_io(inode); - mutex_unlock(&inode->i_mutex); - } /* * Nolock dioread optimization may be dynamically disabled * via ext4_inode_block_unlocked_dio(). Check inode's state @@ -779,27 +774,18 @@ int ext4_ind_calc_metadata_amount(struct inode *inode, sector_t lblock) return (blk_bits / EXT4_ADDR_PER_BLOCK_BITS(inode->i_sb)) + 1; } -int ext4_ind_trans_blocks(struct inode *inode, int nrblocks, int chunk) +/* + * Calculate number of indirect blocks touched by mapping @nrblocks logically + * contiguous blocks + */ +int ext4_ind_trans_blocks(struct inode *inode, int nrblocks) { - int indirects; - - /* if nrblocks are contiguous */ - if (chunk) { - /* - * With N contiguous data blocks, we need at most - * N/EXT4_ADDR_PER_BLOCK(inode->i_sb) + 1 indirect blocks, - * 2 dindirect blocks, and 1 tindirect block - */ - return DIV_ROUND_UP(nrblocks, - EXT4_ADDR_PER_BLOCK(inode->i_sb)) + 4; - } /* - * if nrblocks are not contiguous, worse case, each block touch - * a indirect block, and each indirect block touch a double indirect - * block, plus a triple indirect block + * With N contiguous data blocks, we need at most + * N/EXT4_ADDR_PER_BLOCK(inode->i_sb) + 1 indirect blocks, + * 2 dindirect blocks, and 1 tindirect block */ - indirects = nrblocks * 2 + 1; - return indirects; + return DIV_ROUND_UP(nrblocks, EXT4_ADDR_PER_BLOCK(inode->i_sb)) + 4; } /* @@ -940,11 +926,13 @@ static int ext4_clear_blocks(handle_t *handle, struct inode *inode, __le32 *last) { __le32 *p; - int flags = EXT4_FREE_BLOCKS_FORGET | EXT4_FREE_BLOCKS_VALIDATED; + int flags = EXT4_FREE_BLOCKS_VALIDATED; int err; if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) - flags |= EXT4_FREE_BLOCKS_METADATA; + flags |= EXT4_FREE_BLOCKS_FORGET | EXT4_FREE_BLOCKS_METADATA; + else if (ext4_should_journal_data(inode)) + flags |= EXT4_FREE_BLOCKS_FORGET; if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), block_to_free, count)) { diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c index 3e2bf873e8a8..d9ecbf1113a7 100644 --- a/fs/ext4/inline.c +++ b/fs/ext4/inline.c @@ -72,7 +72,7 @@ static int get_max_inline_xattr_value_size(struct inode *inode, entry = (struct ext4_xattr_entry *) ((void *)raw_inode + EXT4_I(inode)->i_inline_off); - free += le32_to_cpu(entry->e_value_size); + free += EXT4_XATTR_SIZE(le32_to_cpu(entry->e_value_size)); goto out; } @@ -1404,16 +1404,15 @@ out: * offset as if '.' and '..' really take place. * */ -int ext4_read_inline_dir(struct file *filp, - void *dirent, filldir_t filldir, +int ext4_read_inline_dir(struct file *file, + struct dir_context *ctx, int *has_inline_data) { - int error = 0; unsigned int offset, parent_ino; - int i, stored; + int i; struct ext4_dir_entry_2 *de; struct super_block *sb; - struct inode *inode = file_inode(filp); + struct inode *inode = file_inode(file); int ret, inline_size = 0; struct ext4_iloc iloc; void *dir_buf = NULL; @@ -1444,9 +1443,8 @@ int ext4_read_inline_dir(struct file *filp, goto out; sb = inode->i_sb; - stored = 0; parent_ino = le32_to_cpu(((struct ext4_dir_entry_2 *)dir_buf)->inode); - offset = filp->f_pos; + offset = ctx->pos; /* * dotdot_offset and dotdot_size is the real offset and @@ -1460,104 +1458,74 @@ int ext4_read_inline_dir(struct file *filp, extra_offset = dotdot_size - EXT4_INLINE_DOTDOT_SIZE; extra_size = extra_offset + inline_size; - while (!error && !stored && filp->f_pos < extra_size) { -revalidate: - /* - * If the version has changed since the last call to - * readdir(2), then we might be pointing to an invalid - * dirent right now. Scan from the start of the inline - * dir to make sure. - */ - if (filp->f_version != inode->i_version) { - for (i = 0; i < extra_size && i < offset;) { - /* - * "." is with offset 0 and - * ".." is dotdot_offset. - */ - if (!i) { - i = dotdot_offset; - continue; - } else if (i == dotdot_offset) { - i = dotdot_size; - continue; - } - /* for other entry, the real offset in - * the buf has to be tuned accordingly. - */ - de = (struct ext4_dir_entry_2 *) - (dir_buf + i - extra_offset); - /* It's too expensive to do a full - * dirent test each time round this - * loop, but we do have to test at - * least that it is non-zero. A - * failure will be detected in the - * dirent test below. */ - if (ext4_rec_len_from_disk(de->rec_len, - extra_size) < EXT4_DIR_REC_LEN(1)) - break; - i += ext4_rec_len_from_disk(de->rec_len, - extra_size); - } - offset = i; - filp->f_pos = offset; - filp->f_version = inode->i_version; - } - - while (!error && filp->f_pos < extra_size) { - if (filp->f_pos == 0) { - error = filldir(dirent, ".", 1, 0, inode->i_ino, - DT_DIR); - if (error) - break; - stored++; - filp->f_pos = dotdot_offset; + /* + * If the version has changed since the last call to + * readdir(2), then we might be pointing to an invalid + * dirent right now. Scan from the start of the inline + * dir to make sure. + */ + if (file->f_version != inode->i_version) { + for (i = 0; i < extra_size && i < offset;) { + /* + * "." is with offset 0 and + * ".." is dotdot_offset. + */ + if (!i) { + i = dotdot_offset; + continue; + } else if (i == dotdot_offset) { + i = dotdot_size; continue; } + /* for other entry, the real offset in + * the buf has to be tuned accordingly. + */ + de = (struct ext4_dir_entry_2 *) + (dir_buf + i - extra_offset); + /* It's too expensive to do a full + * dirent test each time round this + * loop, but we do have to test at + * least that it is non-zero. A + * failure will be detected in the + * dirent test below. */ + if (ext4_rec_len_from_disk(de->rec_len, extra_size) + < EXT4_DIR_REC_LEN(1)) + break; + i += ext4_rec_len_from_disk(de->rec_len, + extra_size); + } + offset = i; + ctx->pos = offset; + file->f_version = inode->i_version; + } - if (filp->f_pos == dotdot_offset) { - error = filldir(dirent, "..", 2, - dotdot_offset, - parent_ino, DT_DIR); - if (error) - break; - stored++; + while (ctx->pos < extra_size) { + if (ctx->pos == 0) { + if (!dir_emit(ctx, ".", 1, inode->i_ino, DT_DIR)) + goto out; + ctx->pos = dotdot_offset; + continue; + } - filp->f_pos = dotdot_size; - continue; - } + if (ctx->pos == dotdot_offset) { + if (!dir_emit(ctx, "..", 2, parent_ino, DT_DIR)) + goto out; + ctx->pos = dotdot_size; + continue; + } - de = (struct ext4_dir_entry_2 *) - (dir_buf + filp->f_pos - extra_offset); - if (ext4_check_dir_entry(inode, filp, de, - iloc.bh, dir_buf, - extra_size, filp->f_pos)) { - ret = stored; + de = (struct ext4_dir_entry_2 *) + (dir_buf + ctx->pos - extra_offset); + if (ext4_check_dir_entry(inode, file, de, iloc.bh, dir_buf, + extra_size, ctx->pos)) + goto out; + if (le32_to_cpu(de->inode)) { + if (!dir_emit(ctx, de->name, de->name_len, + le32_to_cpu(de->inode), + get_dtype(sb, de->file_type))) goto out; - } - if (le32_to_cpu(de->inode)) { - /* We might block in the next section - * if the data destination is - * currently swapped out. So, use a - * version stamp to detect whether or - * not the directory has been modified - * during the copy operation. - */ - u64 version = filp->f_version; - - error = filldir(dirent, de->name, - de->name_len, - filp->f_pos, - le32_to_cpu(de->inode), - get_dtype(sb, de->file_type)); - if (error) - break; - if (version != filp->f_version) - goto revalidate; - stored++; - } - filp->f_pos += ext4_rec_len_from_disk(de->rec_len, - extra_size); } + ctx->pos += ext4_rec_len_from_disk(de->rec_len, extra_size); } out: kfree(dir_buf); @@ -1842,7 +1810,7 @@ int ext4_inline_data_fiemap(struct inode *inode, if (error) goto out; - physical = iloc.bh->b_blocknr << inode->i_sb->s_blocksize_bits; + physical = (__u64)iloc.bh->b_blocknr << inode->i_sb->s_blocksize_bits; physical += (char *)ext4_raw_inode(&iloc) - iloc.bh->b_data; physical += offsetof(struct ext4_inode, i_block); length = i_size_read(inode); diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index d6382b89ecbd..0188e65e1f58 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -132,12 +132,12 @@ static inline int ext4_begin_ordered_truncate(struct inode *inode, new_size); } -static void ext4_invalidatepage(struct page *page, unsigned long offset); +static void ext4_invalidatepage(struct page *page, unsigned int offset, + unsigned int length); static int __ext4_journalled_writepage(struct page *page, unsigned int len); static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh); -static int ext4_discard_partial_page_buffers_no_lock(handle_t *handle, - struct inode *inode, struct page *page, loff_t from, - loff_t length, int flags); +static int ext4_meta_trans_blocks(struct inode *inode, int lblocks, + int pextents); /* * Test whether an inode is a fast symlink. @@ -215,7 +215,8 @@ void ext4_evict_inode(struct inode *inode) filemap_write_and_wait(&inode->i_data); } truncate_inode_pages(&inode->i_data, 0); - ext4_ioend_shutdown(inode); + + WARN_ON(atomic_read(&EXT4_I(inode)->i_ioend_count)); goto no_delete; } @@ -225,8 +226,8 @@ void ext4_evict_inode(struct inode *inode) if (ext4_should_order_data(inode)) ext4_begin_ordered_truncate(inode, 0); truncate_inode_pages(&inode->i_data, 0); - ext4_ioend_shutdown(inode); + WARN_ON(atomic_read(&EXT4_I(inode)->i_ioend_count)); if (is_bad_inode(inode)) goto no_delete; @@ -423,66 +424,6 @@ static int __check_block_validity(struct inode *inode, const char *func, #define check_block_validity(inode, map) \ __check_block_validity((inode), __func__, __LINE__, (map)) -/* - * Return the number of contiguous dirty pages in a given inode - * starting at page frame idx. - */ -static pgoff_t ext4_num_dirty_pages(struct inode *inode, pgoff_t idx, - unsigned int max_pages) -{ - struct address_space *mapping = inode->i_mapping; - pgoff_t index; - struct pagevec pvec; - pgoff_t num = 0; - int i, nr_pages, done = 0; - - if (max_pages == 0) - return 0; - pagevec_init(&pvec, 0); - while (!done) { - index = idx; - nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, - PAGECACHE_TAG_DIRTY, - (pgoff_t)PAGEVEC_SIZE); - if (nr_pages == 0) - break; - for (i = 0; i < nr_pages; i++) { - struct page *page = pvec.pages[i]; - struct buffer_head *bh, *head; - - lock_page(page); - if (unlikely(page->mapping != mapping) || - !PageDirty(page) || - PageWriteback(page) || - page->index != idx) { - done = 1; - unlock_page(page); - break; - } - if (page_has_buffers(page)) { - bh = head = page_buffers(page); - do { - if (!buffer_delay(bh) && - !buffer_unwritten(bh)) - done = 1; - bh = bh->b_this_page; - } while (!done && (bh != head)); - } - unlock_page(page); - if (done) - break; - idx++; - num++; - if (num >= max_pages) { - done = 1; - break; - } - } - pagevec_release(&pvec); - } - return num; -} - #ifdef ES_AGGRESSIVE_TEST static void ext4_map_blocks_es_recheck(handle_t *handle, struct inode *inode, @@ -573,6 +514,8 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode, "logical block %lu\n", inode->i_ino, flags, map->m_len, (unsigned long) map->m_lblk); + ext4_es_lru_add(inode); + /* Lookup extent status tree firstly */ if (ext4_es_lookup_extent(inode, map->m_lblk, &es)) { if (ext4_es_is_written(&es) || ext4_es_is_unwritten(&es)) { @@ -1118,10 +1061,13 @@ static int ext4_write_end(struct file *file, } } - if (ext4_has_inline_data(inode)) - copied = ext4_write_inline_data_end(inode, pos, len, - copied, page); - else + if (ext4_has_inline_data(inode)) { + ret = ext4_write_inline_data_end(inode, pos, len, + copied, page); + if (ret < 0) + goto errout; + copied = ret; + } else copied = block_write_end(file, mapping, pos, len, copied, page, fsdata); @@ -1157,8 +1103,6 @@ static int ext4_write_end(struct file *file, if (i_size_changed) ext4_mark_inode_dirty(handle, inode); - if (copied < 0) - ret = copied; if (pos + len > inode->i_size && ext4_can_truncate(inode)) /* if we have allocated more blocks and copied * less. We will have blocks allocated outside @@ -1415,21 +1359,28 @@ static void ext4_da_release_space(struct inode *inode, int to_free) } static void ext4_da_page_release_reservation(struct page *page, - unsigned long offset) + unsigned int offset, + unsigned int length) { int to_release = 0; struct buffer_head *head, *bh; unsigned int curr_off = 0; struct inode *inode = page->mapping->host; struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); + unsigned int stop = offset + length; int num_clusters; ext4_fsblk_t lblk; + BUG_ON(stop > PAGE_CACHE_SIZE || stop < length); + head = page_buffers(page); bh = head; do { unsigned int next_off = curr_off + bh->b_size; + if (next_off > stop) + break; + if ((offset <= curr_off) && (buffer_delay(bh))) { to_release++; clear_buffer_delay(bh); @@ -1460,140 +1411,43 @@ static void ext4_da_page_release_reservation(struct page *page, * Delayed allocation stuff */ -/* - * mpage_da_submit_io - walks through extent of pages and try to write - * them with writepage() call back - * - * @mpd->inode: inode - * @mpd->first_page: first page of the extent - * @mpd->next_page: page after the last page of the extent - * - * By the time mpage_da_submit_io() is called we expect all blocks - * to be allocated. this may be wrong if allocation failed. - * - * As pages are already locked by write_cache_pages(), we can't use it - */ -static int mpage_da_submit_io(struct mpage_da_data *mpd, - struct ext4_map_blocks *map) -{ - struct pagevec pvec; - unsigned long index, end; - int ret = 0, err, nr_pages, i; - struct inode *inode = mpd->inode; - struct address_space *mapping = inode->i_mapping; - loff_t size = i_size_read(inode); - unsigned int len, block_start; - struct buffer_head *bh, *page_bufs = NULL; - sector_t pblock = 0, cur_logical = 0; - struct ext4_io_submit io_submit; +struct mpage_da_data { + struct inode *inode; + struct writeback_control *wbc; - BUG_ON(mpd->next_page <= mpd->first_page); - memset(&io_submit, 0, sizeof(io_submit)); + pgoff_t first_page; /* The first page to write */ + pgoff_t next_page; /* Current page to examine */ + pgoff_t last_page; /* Last page to examine */ /* - * We need to start from the first_page to the next_page - 1 - * to make sure we also write the mapped dirty buffer_heads. - * If we look at mpd->b_blocknr we would only be looking - * at the currently mapped buffer_heads. + * Extent to map - this can be after first_page because that can be + * fully mapped. We somewhat abuse m_flags to store whether the extent + * is delalloc or unwritten. */ - index = mpd->first_page; - end = mpd->next_page - 1; - - pagevec_init(&pvec, 0); - while (index <= end) { - nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE); - if (nr_pages == 0) - break; - for (i = 0; i < nr_pages; i++) { - int skip_page = 0; - struct page *page = pvec.pages[i]; - - index = page->index; - if (index > end) - break; - - if (index == size >> PAGE_CACHE_SHIFT) - len = size & ~PAGE_CACHE_MASK; - else - len = PAGE_CACHE_SIZE; - if (map) { - cur_logical = index << (PAGE_CACHE_SHIFT - - inode->i_blkbits); - pblock = map->m_pblk + (cur_logical - - map->m_lblk); - } - index++; - - BUG_ON(!PageLocked(page)); - BUG_ON(PageWriteback(page)); - - bh = page_bufs = page_buffers(page); - block_start = 0; - do { - if (map && (cur_logical >= map->m_lblk) && - (cur_logical <= (map->m_lblk + - (map->m_len - 1)))) { - if (buffer_delay(bh)) { - clear_buffer_delay(bh); - bh->b_blocknr = pblock; - } - if (buffer_unwritten(bh) || - buffer_mapped(bh)) - BUG_ON(bh->b_blocknr != pblock); - if (map->m_flags & EXT4_MAP_UNINIT) - set_buffer_uninit(bh); - clear_buffer_unwritten(bh); - } - - /* - * skip page if block allocation undone and - * block is dirty - */ - if (ext4_bh_delay_or_unwritten(NULL, bh)) - skip_page = 1; - bh = bh->b_this_page; - block_start += bh->b_size; - cur_logical++; - pblock++; - } while (bh != page_bufs); - - if (skip_page) { - unlock_page(page); - continue; - } - - clear_page_dirty_for_io(page); - err = ext4_bio_write_page(&io_submit, page, len, - mpd->wbc); - if (!err) - mpd->pages_written++; - /* - * In error case, we have to continue because - * remaining pages are still locked - */ - if (ret == 0) - ret = err; - } - pagevec_release(&pvec); - } - ext4_io_submit(&io_submit); - return ret; -} + struct ext4_map_blocks map; + struct ext4_io_submit io_submit; /* IO submission data */ +}; -static void ext4_da_block_invalidatepages(struct mpage_da_data *mpd) +static void mpage_release_unused_pages(struct mpage_da_data *mpd, + bool invalidate) { int nr_pages, i; pgoff_t index, end; struct pagevec pvec; struct inode *inode = mpd->inode; struct address_space *mapping = inode->i_mapping; - ext4_lblk_t start, last; + + /* This is necessary when next_page == 0. */ + if (mpd->first_page >= mpd->next_page) + return; index = mpd->first_page; end = mpd->next_page - 1; - - start = index << (PAGE_CACHE_SHIFT - inode->i_blkbits); - last = end << (PAGE_CACHE_SHIFT - inode->i_blkbits); - ext4_es_remove_extent(inode, start, last - start + 1); + if (invalidate) { + ext4_lblk_t start, last; + start = index << (PAGE_CACHE_SHIFT - inode->i_blkbits); + last = end << (PAGE_CACHE_SHIFT - inode->i_blkbits); + ext4_es_remove_extent(inode, start, last - start + 1); + } pagevec_init(&pvec, 0); while (index <= end) { @@ -1606,14 +1460,15 @@ static void ext4_da_block_invalidatepages(struct mpage_da_data *mpd) break; BUG_ON(!PageLocked(page)); BUG_ON(PageWriteback(page)); - block_invalidatepage(page, 0); - ClearPageUptodate(page); + if (invalidate) { + block_invalidatepage(page, 0, PAGE_CACHE_SIZE); + ClearPageUptodate(page); + } unlock_page(page); } index = pvec.pages[nr_pages - 1]->index + 1; pagevec_release(&pvec); } - return; } static void ext4_print_free_blocks(struct inode *inode) @@ -1642,215 +1497,6 @@ static void ext4_print_free_blocks(struct inode *inode) return; } -/* - * mpage_da_map_and_submit - go through given space, map them - * if necessary, and then submit them for I/O - * - * @mpd - bh describing space - * - * The function skips space we know is already mapped to disk blocks. - * - */ -static void mpage_da_map_and_submit(struct mpage_da_data *mpd) -{ - int err, blks, get_blocks_flags; - struct ext4_map_blocks map, *mapp = NULL; - sector_t next = mpd->b_blocknr; - unsigned max_blocks = mpd->b_size >> mpd->inode->i_blkbits; - loff_t disksize = EXT4_I(mpd->inode)->i_disksize; - handle_t *handle = NULL; - - /* - * If the blocks are mapped already, or we couldn't accumulate - * any blocks, then proceed immediately to the submission stage. - */ - if ((mpd->b_size == 0) || - ((mpd->b_state & (1 << BH_Mapped)) && - !(mpd->b_state & (1 << BH_Delay)) && - !(mpd->b_state & (1 << BH_Unwritten)))) - goto submit_io; - - handle = ext4_journal_current_handle(); - BUG_ON(!handle); - - /* - * Call ext4_map_blocks() to allocate any delayed allocation - * blocks, or to convert an uninitialized extent to be - * initialized (in the case where we have written into - * one or more preallocated blocks). - * - * We pass in the magic EXT4_GET_BLOCKS_DELALLOC_RESERVE to - * indicate that we are on the delayed allocation path. This - * affects functions in many different parts of the allocation - * call path. This flag exists primarily because we don't - * want to change *many* call functions, so ext4_map_blocks() - * will set the EXT4_STATE_DELALLOC_RESERVED flag once the - * inode's allocation semaphore is taken. - * - * If the blocks in questions were delalloc blocks, set - * EXT4_GET_BLOCKS_DELALLOC_RESERVE so the delalloc accounting - * variables are updated after the blocks have been allocated. - */ - map.m_lblk = next; - map.m_len = max_blocks; - /* - * We're in delalloc path and it is possible that we're going to - * need more metadata blocks than previously reserved. However - * we must not fail because we're in writeback and there is - * nothing we can do about it so it might result in data loss. - * So use reserved blocks to allocate metadata if possible. - */ - get_blocks_flags = EXT4_GET_BLOCKS_CREATE | - EXT4_GET_BLOCKS_METADATA_NOFAIL; - if (ext4_should_dioread_nolock(mpd->inode)) - get_blocks_flags |= EXT4_GET_BLOCKS_IO_CREATE_EXT; - if (mpd->b_state & (1 << BH_Delay)) - get_blocks_flags |= EXT4_GET_BLOCKS_DELALLOC_RESERVE; - - - blks = ext4_map_blocks(handle, mpd->inode, &map, get_blocks_flags); - if (blks < 0) { - struct super_block *sb = mpd->inode->i_sb; - - err = blks; - /* - * If get block returns EAGAIN or ENOSPC and there - * appears to be free blocks we will just let - * mpage_da_submit_io() unlock all of the pages. - */ - if (err == -EAGAIN) - goto submit_io; - - if (err == -ENOSPC && ext4_count_free_clusters(sb)) { - mpd->retval = err; - goto submit_io; - } - - /* - * get block failure will cause us to loop in - * writepages, because a_ops->writepage won't be able - * to make progress. The page will be redirtied by - * writepage and writepages will again try to write - * the same. - */ - if (!(EXT4_SB(sb)->s_mount_flags & EXT4_MF_FS_ABORTED)) { - ext4_msg(sb, KERN_CRIT, - "delayed block allocation failed for inode %lu " - "at logical offset %llu with max blocks %zd " - "with error %d", mpd->inode->i_ino, - (unsigned long long) next, - mpd->b_size >> mpd->inode->i_blkbits, err); - ext4_msg(sb, KERN_CRIT, - "This should not happen!! Data will be lost"); - if (err == -ENOSPC) - ext4_print_free_blocks(mpd->inode); - } - /* invalidate all the pages */ - ext4_da_block_invalidatepages(mpd); - - /* Mark this page range as having been completed */ - mpd->io_done = 1; - return; - } - BUG_ON(blks == 0); - - mapp = ↦ - if (map.m_flags & EXT4_MAP_NEW) { - struct block_device *bdev = mpd->inode->i_sb->s_bdev; - int i; - - for (i = 0; i < map.m_len; i++) - unmap_underlying_metadata(bdev, map.m_pblk + i); - } - - /* - * Update on-disk size along with block allocation. - */ - disksize = ((loff_t) next + blks) << mpd->inode->i_blkbits; - if (disksize > i_size_read(mpd->inode)) - disksize = i_size_read(mpd->inode); - if (disksize > EXT4_I(mpd->inode)->i_disksize) { - ext4_update_i_disksize(mpd->inode, disksize); - err = ext4_mark_inode_dirty(handle, mpd->inode); - if (err) - ext4_error(mpd->inode->i_sb, - "Failed to mark inode %lu dirty", - mpd->inode->i_ino); - } - -submit_io: - mpage_da_submit_io(mpd, mapp); - mpd->io_done = 1; -} - -#define BH_FLAGS ((1 << BH_Uptodate) | (1 << BH_Mapped) | \ - (1 << BH_Delay) | (1 << BH_Unwritten)) - -/* - * mpage_add_bh_to_extent - try to add one more block to extent of blocks - * - * @mpd->lbh - extent of blocks - * @logical - logical number of the block in the file - * @b_state - b_state of the buffer head added - * - * the function is used to collect contig. blocks in same state - */ -static void mpage_add_bh_to_extent(struct mpage_da_data *mpd, sector_t logical, - unsigned long b_state) -{ - sector_t next; - int blkbits = mpd->inode->i_blkbits; - int nrblocks = mpd->b_size >> blkbits; - - /* - * XXX Don't go larger than mballoc is willing to allocate - * This is a stopgap solution. We eventually need to fold - * mpage_da_submit_io() into this function and then call - * ext4_map_blocks() multiple times in a loop - */ - if (nrblocks >= (8*1024*1024 >> blkbits)) - goto flush_it; - - /* check if the reserved journal credits might overflow */ - if (!ext4_test_inode_flag(mpd->inode, EXT4_INODE_EXTENTS)) { - if (nrblocks >= EXT4_MAX_TRANS_DATA) { - /* - * With non-extent format we are limited by the journal - * credit available. Total credit needed to insert - * nrblocks contiguous blocks is dependent on the - * nrblocks. So limit nrblocks. - */ - goto flush_it; - } - } - /* - * First block in the extent - */ - if (mpd->b_size == 0) { - mpd->b_blocknr = logical; - mpd->b_size = 1 << blkbits; - mpd->b_state = b_state & BH_FLAGS; - return; - } - - next = mpd->b_blocknr + nrblocks; - /* - * Can we merge the block to our big extent? - */ - if (logical == next && (b_state & BH_FLAGS) == mpd->b_state) { - mpd->b_size += 1 << blkbits; - return; - } - -flush_it: - /* - * We couldn't merge the block to our extent, so we - * need to flush current extent and start new one - */ - mpage_da_map_and_submit(mpd); - return; -} - static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh) { return (buffer_delay(bh) || buffer_unwritten(bh)) && buffer_dirty(bh); @@ -1883,6 +1529,8 @@ static int ext4_da_map_blocks(struct inode *inode, sector_t iblock, "logical block %lu\n", inode->i_ino, map->m_len, (unsigned long) map->m_lblk); + ext4_es_lru_add(inode); + /* Lookup extent status tree firstly */ if (ext4_es_lookup_extent(inode, iblock, &es)) { @@ -2156,7 +1804,7 @@ out: * lock so we have to do some magic. * * This function can get called via... - * - ext4_da_writepages after taking page lock (have journal handle) + * - ext4_writepages after taking page lock (have journal handle) * - journal_submit_inode_data_buffers (no journal handle) * - shrink_page_list via the kswapd/direct reclaim (no journal handle) * - grab_page_cache when doing write_begin (have journal handle) @@ -2234,76 +1882,405 @@ static int ext4_writepage(struct page *page, */ return __ext4_journalled_writepage(page, len); - memset(&io_submit, 0, sizeof(io_submit)); + ext4_io_submit_init(&io_submit, wbc); + io_submit.io_end = ext4_init_io_end(inode, GFP_NOFS); + if (!io_submit.io_end) { + redirty_page_for_writepage(wbc, page); + unlock_page(page); + return -ENOMEM; + } ret = ext4_bio_write_page(&io_submit, page, len, wbc); ext4_io_submit(&io_submit); + /* Drop io_end reference we got from init */ + ext4_put_io_end_defer(io_submit.io_end); return ret; } +#define BH_FLAGS ((1 << BH_Unwritten) | (1 << BH_Delay)) + /* - * This is called via ext4_da_writepages() to - * calculate the total number of credits to reserve to fit - * a single extent allocation into a single transaction, - * ext4_da_writpeages() will loop calling this before - * the block allocation. + * mballoc gives us at most this number of blocks... + * XXX: That seems to be only a limitation of ext4_mb_normalize_request(). + * The rest of mballoc seems to handle chunks upto full group size. */ +#define MAX_WRITEPAGES_EXTENT_LEN 2048 -static int ext4_da_writepages_trans_blocks(struct inode *inode) +/* + * mpage_add_bh_to_extent - try to add bh to extent of blocks to map + * + * @mpd - extent of blocks + * @lblk - logical number of the block in the file + * @b_state - b_state of the buffer head added + * + * the function is used to collect contig. blocks in same state + */ +static int mpage_add_bh_to_extent(struct mpage_da_data *mpd, ext4_lblk_t lblk, + unsigned long b_state) +{ + struct ext4_map_blocks *map = &mpd->map; + + /* Don't go larger than mballoc is willing to allocate */ + if (map->m_len >= MAX_WRITEPAGES_EXTENT_LEN) + return 0; + + /* First block in the extent? */ + if (map->m_len == 0) { + map->m_lblk = lblk; + map->m_len = 1; + map->m_flags = b_state & BH_FLAGS; + return 1; + } + + /* Can we merge the block to our big extent? */ + if (lblk == map->m_lblk + map->m_len && + (b_state & BH_FLAGS) == map->m_flags) { + map->m_len++; + return 1; + } + return 0; +} + +static bool add_page_bufs_to_extent(struct mpage_da_data *mpd, + struct buffer_head *head, + struct buffer_head *bh, + ext4_lblk_t lblk) +{ + struct inode *inode = mpd->inode; + ext4_lblk_t blocks = (i_size_read(inode) + (1 << inode->i_blkbits) - 1) + >> inode->i_blkbits; + + do { + BUG_ON(buffer_locked(bh)); + + if (!buffer_dirty(bh) || !buffer_mapped(bh) || + (!buffer_delay(bh) && !buffer_unwritten(bh)) || + lblk >= blocks) { + /* Found extent to map? */ + if (mpd->map.m_len) + return false; + if (lblk >= blocks) + return true; + continue; + } + if (!mpage_add_bh_to_extent(mpd, lblk, bh->b_state)) + return false; + } while (lblk++, (bh = bh->b_this_page) != head); + return true; +} + +static int mpage_submit_page(struct mpage_da_data *mpd, struct page *page) { - int max_blocks = EXT4_I(inode)->i_reserved_data_blocks; + int len; + loff_t size = i_size_read(mpd->inode); + int err; + + BUG_ON(page->index != mpd->first_page); + if (page->index == size >> PAGE_CACHE_SHIFT) + len = size & ~PAGE_CACHE_MASK; + else + len = PAGE_CACHE_SIZE; + clear_page_dirty_for_io(page); + err = ext4_bio_write_page(&mpd->io_submit, page, len, mpd->wbc); + if (!err) + mpd->wbc->nr_to_write--; + mpd->first_page++; + return err; +} + +/* + * mpage_map_buffers - update buffers corresponding to changed extent and + * submit fully mapped pages for IO + * + * @mpd - description of extent to map, on return next extent to map + * + * Scan buffers corresponding to changed extent (we expect corresponding pages + * to be already locked) and update buffer state according to new extent state. + * We map delalloc buffers to their physical location, clear unwritten bits, + * and mark buffers as uninit when we perform writes to uninitialized extents + * and do extent conversion after IO is finished. If the last page is not fully + * mapped, we update @map to the next extent in the last page that needs + * mapping. Otherwise we submit the page for IO. + */ +static int mpage_map_and_submit_buffers(struct mpage_da_data *mpd) +{ + struct pagevec pvec; + int nr_pages, i; + struct inode *inode = mpd->inode; + struct buffer_head *head, *bh; + int bpp_bits = PAGE_CACHE_SHIFT - inode->i_blkbits; + ext4_lblk_t blocks = (i_size_read(inode) + (1 << inode->i_blkbits) - 1) + >> inode->i_blkbits; + pgoff_t start, end; + ext4_lblk_t lblk; + sector_t pblock; + int err; + + start = mpd->map.m_lblk >> bpp_bits; + end = (mpd->map.m_lblk + mpd->map.m_len - 1) >> bpp_bits; + lblk = start << bpp_bits; + pblock = mpd->map.m_pblk; + + pagevec_init(&pvec, 0); + while (start <= end) { + nr_pages = pagevec_lookup(&pvec, inode->i_mapping, start, + PAGEVEC_SIZE); + if (nr_pages == 0) + break; + for (i = 0; i < nr_pages; i++) { + struct page *page = pvec.pages[i]; + + if (page->index > end) + break; + /* Upto 'end' pages must be contiguous */ + BUG_ON(page->index != start); + bh = head = page_buffers(page); + do { + if (lblk < mpd->map.m_lblk) + continue; + if (lblk >= mpd->map.m_lblk + mpd->map.m_len) { + /* + * Buffer after end of mapped extent. + * Find next buffer in the page to map. + */ + mpd->map.m_len = 0; + mpd->map.m_flags = 0; + add_page_bufs_to_extent(mpd, head, bh, + lblk); + pagevec_release(&pvec); + return 0; + } + if (buffer_delay(bh)) { + clear_buffer_delay(bh); + bh->b_blocknr = pblock++; + } + clear_buffer_unwritten(bh); + } while (++lblk < blocks && + (bh = bh->b_this_page) != head); + + /* + * FIXME: This is going to break if dioread_nolock + * supports blocksize < pagesize as we will try to + * convert potentially unmapped parts of inode. + */ + mpd->io_submit.io_end->size += PAGE_CACHE_SIZE; + /* Page fully mapped - let IO run! */ + err = mpage_submit_page(mpd, page); + if (err < 0) { + pagevec_release(&pvec); + return err; + } + start++; + } + pagevec_release(&pvec); + } + /* Extent fully mapped and matches with page boundary. We are done. */ + mpd->map.m_len = 0; + mpd->map.m_flags = 0; + return 0; +} + +static int mpage_map_one_extent(handle_t *handle, struct mpage_da_data *mpd) +{ + struct inode *inode = mpd->inode; + struct ext4_map_blocks *map = &mpd->map; + int get_blocks_flags; + int err; + + trace_ext4_da_write_pages_extent(inode, map); /* - * With non-extent format the journal credit needed to - * insert nrblocks contiguous block is dependent on - * number of contiguous block. So we will limit - * number of contiguous block to a sane value + * Call ext4_map_blocks() to allocate any delayed allocation blocks, or + * to convert an uninitialized extent to be initialized (in the case + * where we have written into one or more preallocated blocks). It is + * possible that we're going to need more metadata blocks than + * previously reserved. However we must not fail because we're in + * writeback and there is nothing we can do about it so it might result + * in data loss. So use reserved blocks to allocate metadata if + * possible. + * + * We pass in the magic EXT4_GET_BLOCKS_DELALLOC_RESERVE if the blocks + * in question are delalloc blocks. This affects functions in many + * different parts of the allocation call path. This flag exists + * primarily because we don't want to change *many* call functions, so + * ext4_map_blocks() will set the EXT4_STATE_DELALLOC_RESERVED flag + * once the inode's allocation semaphore is taken. */ - if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) && - (max_blocks > EXT4_MAX_TRANS_DATA)) - max_blocks = EXT4_MAX_TRANS_DATA; + get_blocks_flags = EXT4_GET_BLOCKS_CREATE | + EXT4_GET_BLOCKS_METADATA_NOFAIL; + if (ext4_should_dioread_nolock(inode)) + get_blocks_flags |= EXT4_GET_BLOCKS_IO_CREATE_EXT; + if (map->m_flags & (1 << BH_Delay)) + get_blocks_flags |= EXT4_GET_BLOCKS_DELALLOC_RESERVE; - return ext4_chunk_trans_blocks(inode, max_blocks); + err = ext4_map_blocks(handle, inode, map, get_blocks_flags); + if (err < 0) + return err; + if (map->m_flags & EXT4_MAP_UNINIT) { + if (!mpd->io_submit.io_end->handle && + ext4_handle_valid(handle)) { + mpd->io_submit.io_end->handle = handle->h_rsv_handle; + handle->h_rsv_handle = NULL; + } + ext4_set_io_unwritten_flag(inode, mpd->io_submit.io_end); + } + + BUG_ON(map->m_len == 0); + if (map->m_flags & EXT4_MAP_NEW) { + struct block_device *bdev = inode->i_sb->s_bdev; + int i; + + for (i = 0; i < map->m_len; i++) + unmap_underlying_metadata(bdev, map->m_pblk + i); + } + return 0; } /* - * write_cache_pages_da - walk the list of dirty pages of the given - * address space and accumulate pages that need writing, and call - * mpage_da_map_and_submit to map a single contiguous memory region - * and then write them. + * mpage_map_and_submit_extent - map extent starting at mpd->lblk of length + * mpd->len and submit pages underlying it for IO + * + * @handle - handle for journal operations + * @mpd - extent to map + * + * The function maps extent starting at mpd->lblk of length mpd->len. If it is + * delayed, blocks are allocated, if it is unwritten, we may need to convert + * them to initialized or split the described range from larger unwritten + * extent. Note that we need not map all the described range since allocation + * can return less blocks or the range is covered by more unwritten extents. We + * cannot map more because we are limited by reserved transaction credits. On + * the other hand we always make sure that the last touched page is fully + * mapped so that it can be written out (and thus forward progress is + * guaranteed). After mapping we submit all mapped pages for IO. */ -static int write_cache_pages_da(handle_t *handle, - struct address_space *mapping, - struct writeback_control *wbc, - struct mpage_da_data *mpd, - pgoff_t *done_index) +static int mpage_map_and_submit_extent(handle_t *handle, + struct mpage_da_data *mpd, + bool *give_up_on_write) { - struct buffer_head *bh, *head; - struct inode *inode = mapping->host; - struct pagevec pvec; - unsigned int nr_pages; - sector_t logical; - pgoff_t index, end; - long nr_to_write = wbc->nr_to_write; - int i, tag, ret = 0; - - memset(mpd, 0, sizeof(struct mpage_da_data)); - mpd->wbc = wbc; - mpd->inode = inode; - pagevec_init(&pvec, 0); - index = wbc->range_start >> PAGE_CACHE_SHIFT; - end = wbc->range_end >> PAGE_CACHE_SHIFT; + struct inode *inode = mpd->inode; + struct ext4_map_blocks *map = &mpd->map; + int err; + loff_t disksize; - if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) + mpd->io_submit.io_end->offset = + ((loff_t)map->m_lblk) << inode->i_blkbits; + while (map->m_len) { + err = mpage_map_one_extent(handle, mpd); + if (err < 0) { + struct super_block *sb = inode->i_sb; + + if (EXT4_SB(sb)->s_mount_flags & EXT4_MF_FS_ABORTED) + goto invalidate_dirty_pages; + /* + * Let the uper layers retry transient errors. + * In the case of ENOSPC, if ext4_count_free_blocks() + * is non-zero, a commit should free up blocks. + */ + if ((err == -ENOMEM) || + (err == -ENOSPC && ext4_count_free_clusters(sb))) + return err; + ext4_msg(sb, KERN_CRIT, + "Delayed block allocation failed for " + "inode %lu at logical offset %llu with" + " max blocks %u with error %d", + inode->i_ino, + (unsigned long long)map->m_lblk, + (unsigned)map->m_len, -err); + ext4_msg(sb, KERN_CRIT, + "This should not happen!! Data will " + "be lost\n"); + if (err == -ENOSPC) + ext4_print_free_blocks(inode); + invalidate_dirty_pages: + *give_up_on_write = true; + return err; + } + /* + * Update buffer state, submit mapped pages, and get us new + * extent to map + */ + err = mpage_map_and_submit_buffers(mpd); + if (err < 0) + return err; + } + + /* Update on-disk size after IO is submitted */ + disksize = ((loff_t)mpd->first_page) << PAGE_CACHE_SHIFT; + if (disksize > i_size_read(inode)) + disksize = i_size_read(inode); + if (disksize > EXT4_I(inode)->i_disksize) { + int err2; + + ext4_update_i_disksize(inode, disksize); + err2 = ext4_mark_inode_dirty(handle, inode); + if (err2) + ext4_error(inode->i_sb, + "Failed to mark inode %lu dirty", + inode->i_ino); + if (!err) + err = err2; + } + return err; +} + +/* + * Calculate the total number of credits to reserve for one writepages + * iteration. This is called from ext4_writepages(). We map an extent of + * upto MAX_WRITEPAGES_EXTENT_LEN blocks and then we go on and finish mapping + * the last partial page. So in total we can map MAX_WRITEPAGES_EXTENT_LEN + + * bpp - 1 blocks in bpp different extents. + */ +static int ext4_da_writepages_trans_blocks(struct inode *inode) +{ + int bpp = ext4_journal_blocks_per_page(inode); + + return ext4_meta_trans_blocks(inode, + MAX_WRITEPAGES_EXTENT_LEN + bpp - 1, bpp); +} + +/* + * mpage_prepare_extent_to_map - find & lock contiguous range of dirty pages + * and underlying extent to map + * + * @mpd - where to look for pages + * + * Walk dirty pages in the mapping. If they are fully mapped, submit them for + * IO immediately. When we find a page which isn't mapped we start accumulating + * extent of buffers underlying these pages that needs mapping (formed by + * either delayed or unwritten buffers). We also lock the pages containing + * these buffers. The extent found is returned in @mpd structure (starting at + * mpd->lblk with length mpd->len blocks). + * + * Note that this function can attach bios to one io_end structure which are + * neither logically nor physically contiguous. Although it may seem as an + * unnecessary complication, it is actually inevitable in blocksize < pagesize + * case as we need to track IO to all buffers underlying a page in one io_end. + */ +static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd) +{ + struct address_space *mapping = mpd->inode->i_mapping; + struct pagevec pvec; + unsigned int nr_pages; + pgoff_t index = mpd->first_page; + pgoff_t end = mpd->last_page; + int tag; + int i, err = 0; + int blkbits = mpd->inode->i_blkbits; + ext4_lblk_t lblk; + struct buffer_head *head; + + if (mpd->wbc->sync_mode == WB_SYNC_ALL || mpd->wbc->tagged_writepages) tag = PAGECACHE_TAG_TOWRITE; else tag = PAGECACHE_TAG_DIRTY; - *done_index = index; + pagevec_init(&pvec, 0); + mpd->map.m_len = 0; + mpd->next_page = index; while (index <= end) { nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag, min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1); if (nr_pages == 0) - return 0; + goto out; for (i = 0; i < nr_pages; i++) { struct page *page = pvec.pages[i]; @@ -2318,31 +2295,21 @@ static int write_cache_pages_da(handle_t *handle, if (page->index > end) goto out; - *done_index = page->index + 1; - - /* - * If we can't merge this page, and we have - * accumulated an contiguous region, write it - */ - if ((mpd->next_page != page->index) && - (mpd->next_page != mpd->first_page)) { - mpage_da_map_and_submit(mpd); - goto ret_extent_tail; - } + /* If we can't merge this page, we are done. */ + if (mpd->map.m_len > 0 && mpd->next_page != page->index) + goto out; lock_page(page); - /* - * If the page is no longer dirty, or its - * mapping no longer corresponds to inode we - * are writing (which means it has been - * truncated or invalidated), or the page is - * already under writeback and we are not - * doing a data integrity writeback, skip the page + * If the page is no longer dirty, or its mapping no + * longer corresponds to inode we are writing (which + * means it has been truncated or invalidated), or the + * page is already under writeback and we are not doing + * a data integrity writeback, skip the page */ if (!PageDirty(page) || (PageWriteback(page) && - (wbc->sync_mode == WB_SYNC_NONE)) || + (mpd->wbc->sync_mode == WB_SYNC_NONE)) || unlikely(page->mapping != mapping)) { unlock_page(page); continue; @@ -2351,106 +2318,70 @@ static int write_cache_pages_da(handle_t *handle, wait_on_page_writeback(page); BUG_ON(PageWriteback(page)); - /* - * If we have inline data and arrive here, it means that - * we will soon create the block for the 1st page, so - * we'd better clear the inline data here. - */ - if (ext4_has_inline_data(inode)) { - BUG_ON(ext4_test_inode_state(inode, - EXT4_STATE_MAY_INLINE_DATA)); - ext4_destroy_inline_data(handle, inode); - } - - if (mpd->next_page != page->index) + if (mpd->map.m_len == 0) mpd->first_page = page->index; mpd->next_page = page->index + 1; - logical = (sector_t) page->index << - (PAGE_CACHE_SHIFT - inode->i_blkbits); - /* Add all dirty buffers to mpd */ + lblk = ((ext4_lblk_t)page->index) << + (PAGE_CACHE_SHIFT - blkbits); head = page_buffers(page); - bh = head; - do { - BUG_ON(buffer_locked(bh)); - /* - * We need to try to allocate unmapped blocks - * in the same page. Otherwise we won't make - * progress with the page in ext4_writepage - */ - if (ext4_bh_delay_or_unwritten(NULL, bh)) { - mpage_add_bh_to_extent(mpd, logical, - bh->b_state); - if (mpd->io_done) - goto ret_extent_tail; - } else if (buffer_dirty(bh) && - buffer_mapped(bh)) { - /* - * mapped dirty buffer. We need to - * update the b_state because we look - * at b_state in mpage_da_map_blocks. - * We don't update b_size because if we - * find an unmapped buffer_head later - * we need to use the b_state flag of - * that buffer_head. - */ - if (mpd->b_size == 0) - mpd->b_state = - bh->b_state & BH_FLAGS; - } - logical++; - } while ((bh = bh->b_this_page) != head); - - if (nr_to_write > 0) { - nr_to_write--; - if (nr_to_write == 0 && - wbc->sync_mode == WB_SYNC_NONE) - /* - * We stop writing back only if we are - * not doing integrity sync. In case of - * integrity sync we have to keep going - * because someone may be concurrently - * dirtying pages, and we might have - * synced a lot of newly appeared dirty - * pages, but have not synced all of the - * old dirty pages. - */ + if (!add_page_bufs_to_extent(mpd, head, head, lblk)) + goto out; + /* So far everything mapped? Submit the page for IO. */ + if (mpd->map.m_len == 0) { + err = mpage_submit_page(mpd, page); + if (err < 0) goto out; } + + /* + * Accumulated enough dirty pages? This doesn't apply + * to WB_SYNC_ALL mode. For integrity sync we have to + * keep going because someone may be concurrently + * dirtying pages, and we might have synced a lot of + * newly appeared dirty pages, but have not synced all + * of the old dirty pages. + */ + if (mpd->wbc->sync_mode == WB_SYNC_NONE && + mpd->next_page - mpd->first_page >= + mpd->wbc->nr_to_write) + goto out; } pagevec_release(&pvec); cond_resched(); } return 0; -ret_extent_tail: - ret = MPAGE_DA_EXTENT_TAIL; out: pagevec_release(&pvec); - cond_resched(); - return ret; + return err; } +static int __writepage(struct page *page, struct writeback_control *wbc, + void *data) +{ + struct address_space *mapping = data; + int ret = ext4_writepage(page, wbc); + mapping_set_error(mapping, ret); + return ret; +} -static int ext4_da_writepages(struct address_space *mapping, - struct writeback_control *wbc) +static int ext4_writepages(struct address_space *mapping, + struct writeback_control *wbc) { - pgoff_t index; + pgoff_t writeback_index = 0; + long nr_to_write = wbc->nr_to_write; int range_whole = 0; + int cycled = 1; handle_t *handle = NULL; struct mpage_da_data mpd; struct inode *inode = mapping->host; - int pages_written = 0; - unsigned int max_pages; - int range_cyclic, cycled = 1, io_done = 0; - int needed_blocks, ret = 0; - long desired_nr_to_write, nr_to_writebump = 0; - loff_t range_start = wbc->range_start; + int needed_blocks, rsv_blocks = 0, ret = 0; struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb); - pgoff_t done_index = 0; - pgoff_t end; + bool done; struct blk_plug plug; + bool give_up_on_write = false; - trace_ext4_da_writepages(inode, wbc); + trace_ext4_writepages(inode, wbc); /* * No pages to write? This is mainly a kludge to avoid starting @@ -2460,164 +2391,165 @@ static int ext4_da_writepages(struct address_space *mapping, if (!mapping->nrpages || !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) return 0; + if (ext4_should_journal_data(inode)) { + struct blk_plug plug; + int ret; + + blk_start_plug(&plug); + ret = write_cache_pages(mapping, wbc, __writepage, mapping); + blk_finish_plug(&plug); + return ret; + } + /* * If the filesystem has aborted, it is read-only, so return * right away instead of dumping stack traces later on that * will obscure the real source of the problem. We test * EXT4_MF_FS_ABORTED instead of sb->s_flag's MS_RDONLY because * the latter could be true if the filesystem is mounted - * read-only, and in that case, ext4_da_writepages should + * read-only, and in that case, ext4_writepages should * *never* be called, so if that ever happens, we would want * the stack trace. */ if (unlikely(sbi->s_mount_flags & EXT4_MF_FS_ABORTED)) return -EROFS; + if (ext4_should_dioread_nolock(inode)) { + /* + * We may need to convert upto one extent per block in + * the page and we may dirty the inode. + */ + rsv_blocks = 1 + (PAGE_CACHE_SIZE >> inode->i_blkbits); + } + + /* + * If we have inline data and arrive here, it means that + * we will soon create the block for the 1st page, so + * we'd better clear the inline data here. + */ + if (ext4_has_inline_data(inode)) { + /* Just inode will be modified... */ + handle = ext4_journal_start(inode, EXT4_HT_INODE, 1); + if (IS_ERR(handle)) { + ret = PTR_ERR(handle); + goto out_writepages; + } + BUG_ON(ext4_test_inode_state(inode, + EXT4_STATE_MAY_INLINE_DATA)); + ext4_destroy_inline_data(handle, inode); + ext4_journal_stop(handle); + } + if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) range_whole = 1; - range_cyclic = wbc->range_cyclic; if (wbc->range_cyclic) { - index = mapping->writeback_index; - if (index) + writeback_index = mapping->writeback_index; + if (writeback_index) cycled = 0; - wbc->range_start = index << PAGE_CACHE_SHIFT; - wbc->range_end = LLONG_MAX; - wbc->range_cyclic = 0; - end = -1; + mpd.first_page = writeback_index; + mpd.last_page = -1; } else { - index = wbc->range_start >> PAGE_CACHE_SHIFT; - end = wbc->range_end >> PAGE_CACHE_SHIFT; - } - - /* - * This works around two forms of stupidity. The first is in - * the writeback code, which caps the maximum number of pages - * written to be 1024 pages. This is wrong on multiple - * levels; different architectues have a different page size, - * which changes the maximum amount of data which gets - * written. Secondly, 4 megabytes is way too small. XFS - * forces this value to be 16 megabytes by multiplying - * nr_to_write parameter by four, and then relies on its - * allocator to allocate larger extents to make them - * contiguous. Unfortunately this brings us to the second - * stupidity, which is that ext4's mballoc code only allocates - * at most 2048 blocks. So we force contiguous writes up to - * the number of dirty blocks in the inode, or - * sbi->max_writeback_mb_bump whichever is smaller. - */ - max_pages = sbi->s_max_writeback_mb_bump << (20 - PAGE_CACHE_SHIFT); - if (!range_cyclic && range_whole) { - if (wbc->nr_to_write == LONG_MAX) - desired_nr_to_write = wbc->nr_to_write; - else - desired_nr_to_write = wbc->nr_to_write * 8; - } else - desired_nr_to_write = ext4_num_dirty_pages(inode, index, - max_pages); - if (desired_nr_to_write > max_pages) - desired_nr_to_write = max_pages; - - if (wbc->nr_to_write < desired_nr_to_write) { - nr_to_writebump = desired_nr_to_write - wbc->nr_to_write; - wbc->nr_to_write = desired_nr_to_write; + mpd.first_page = wbc->range_start >> PAGE_CACHE_SHIFT; + mpd.last_page = wbc->range_end >> PAGE_CACHE_SHIFT; } + mpd.inode = inode; + mpd.wbc = wbc; + ext4_io_submit_init(&mpd.io_submit, wbc); retry: if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) - tag_pages_for_writeback(mapping, index, end); - + tag_pages_for_writeback(mapping, mpd.first_page, mpd.last_page); + done = false; blk_start_plug(&plug); - while (!ret && wbc->nr_to_write > 0) { + while (!done && mpd.first_page <= mpd.last_page) { + /* For each extent of pages we use new io_end */ + mpd.io_submit.io_end = ext4_init_io_end(inode, GFP_KERNEL); + if (!mpd.io_submit.io_end) { + ret = -ENOMEM; + break; + } /* - * we insert one extent at a time. So we need - * credit needed for single extent allocation. - * journalled mode is currently not supported - * by delalloc + * We have two constraints: We find one extent to map and we + * must always write out whole page (makes a difference when + * blocksize < pagesize) so that we don't block on IO when we + * try to write out the rest of the page. Journalled mode is + * not supported by delalloc. */ BUG_ON(ext4_should_journal_data(inode)); needed_blocks = ext4_da_writepages_trans_blocks(inode); - /* start a new transaction*/ - handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, - needed_blocks); + /* start a new transaction */ + handle = ext4_journal_start_with_reserve(inode, + EXT4_HT_WRITE_PAGE, needed_blocks, rsv_blocks); if (IS_ERR(handle)) { ret = PTR_ERR(handle); ext4_msg(inode->i_sb, KERN_CRIT, "%s: jbd2_start: " "%ld pages, ino %lu; err %d", __func__, wbc->nr_to_write, inode->i_ino, ret); - blk_finish_plug(&plug); - goto out_writepages; + /* Release allocated io_end */ + ext4_put_io_end(mpd.io_submit.io_end); + break; } - /* - * Now call write_cache_pages_da() to find the next - * contiguous region of logical blocks that need - * blocks to be allocated by ext4 and submit them. - */ - ret = write_cache_pages_da(handle, mapping, - wbc, &mpd, &done_index); - /* - * If we have a contiguous extent of pages and we - * haven't done the I/O yet, map the blocks and submit - * them for I/O. - */ - if (!mpd.io_done && mpd.next_page != mpd.first_page) { - mpage_da_map_and_submit(&mpd); - ret = MPAGE_DA_EXTENT_TAIL; + trace_ext4_da_write_pages(inode, mpd.first_page, mpd.wbc); + ret = mpage_prepare_extent_to_map(&mpd); + if (!ret) { + if (mpd.map.m_len) + ret = mpage_map_and_submit_extent(handle, &mpd, + &give_up_on_write); + else { + /* + * We scanned the whole range (or exhausted + * nr_to_write), submitted what was mapped and + * didn't find anything needing mapping. We are + * done. + */ + done = true; + } } - trace_ext4_da_write_pages(inode, &mpd); - wbc->nr_to_write -= mpd.pages_written; - ext4_journal_stop(handle); - - if ((mpd.retval == -ENOSPC) && sbi->s_journal) { - /* commit the transaction which would + /* Submit prepared bio */ + ext4_io_submit(&mpd.io_submit); + /* Unlock pages we didn't use */ + mpage_release_unused_pages(&mpd, give_up_on_write); + /* Drop our io_end reference we got from init */ + ext4_put_io_end(mpd.io_submit.io_end); + + if (ret == -ENOSPC && sbi->s_journal) { + /* + * Commit the transaction which would * free blocks released in the transaction * and try again */ jbd2_journal_force_commit_nested(sbi->s_journal); ret = 0; - } else if (ret == MPAGE_DA_EXTENT_TAIL) { - /* - * Got one extent now try with rest of the pages. - * If mpd.retval is set -EIO, journal is aborted. - * So we don't need to write any more. - */ - pages_written += mpd.pages_written; - ret = mpd.retval; - io_done = 1; - } else if (wbc->nr_to_write) - /* - * There is no more writeout needed - * or we requested for a noblocking writeout - * and we found the device congested - */ + continue; + } + /* Fatal error - ENOMEM, EIO... */ + if (ret) break; } blk_finish_plug(&plug); - if (!io_done && !cycled) { + if (!ret && !cycled) { cycled = 1; - index = 0; - wbc->range_start = index << PAGE_CACHE_SHIFT; - wbc->range_end = mapping->writeback_index - 1; + mpd.last_page = writeback_index - 1; + mpd.first_page = 0; goto retry; } /* Update index */ - wbc->range_cyclic = range_cyclic; if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) /* - * set the writeback_index so that range_cyclic + * Set the writeback_index so that range_cyclic * mode will write it back later */ - mapping->writeback_index = done_index; + mapping->writeback_index = mpd.first_page; out_writepages: - wbc->nr_to_write -= nr_to_writebump; - wbc->range_start = range_start; - trace_ext4_da_writepages_result(inode, wbc, ret, pages_written); + trace_ext4_writepages_result(inode, wbc, ret, + nr_to_write - wbc->nr_to_write); return ret; } @@ -2829,7 +2761,8 @@ static int ext4_da_write_end(struct file *file, return ret ? ret : copied; } -static void ext4_da_invalidatepage(struct page *page, unsigned long offset) +static void ext4_da_invalidatepage(struct page *page, unsigned int offset, + unsigned int length) { /* * Drop reserved blocks @@ -2838,10 +2771,10 @@ static void ext4_da_invalidatepage(struct page *page, unsigned long offset) if (!page_has_buffers(page)) goto out; - ext4_da_page_release_reservation(page, offset); + ext4_da_page_release_reservation(page, offset, length); out: - ext4_invalidatepage(page, offset); + ext4_invalidatepage(page, offset, length); return; } @@ -2864,7 +2797,7 @@ int ext4_alloc_da_blocks(struct inode *inode) * laptop_mode, not even desirable). However, to do otherwise * would require replicating code paths in: * - * ext4_da_writepages() -> + * ext4_writepages() -> * write_cache_pages() ---> (via passed in callback function) * __mpage_da_writepage() --> * mpage_add_bh_to_extent() @@ -2989,37 +2922,40 @@ ext4_readpages(struct file *file, struct address_space *mapping, return mpage_readpages(mapping, pages, nr_pages, ext4_get_block); } -static void ext4_invalidatepage(struct page *page, unsigned long offset) +static void ext4_invalidatepage(struct page *page, unsigned int offset, + unsigned int length) { - trace_ext4_invalidatepage(page, offset); + trace_ext4_invalidatepage(page, offset, length); /* No journalling happens on data buffers when this function is used */ WARN_ON(page_has_buffers(page) && buffer_jbd(page_buffers(page))); - block_invalidatepage(page, offset); + block_invalidatepage(page, offset, length); } static int __ext4_journalled_invalidatepage(struct page *page, - unsigned long offset) + unsigned int offset, + unsigned int length) { journal_t *journal = EXT4_JOURNAL(page->mapping->host); - trace_ext4_journalled_invalidatepage(page, offset); + trace_ext4_journalled_invalidatepage(page, offset, length); /* * If it's a full truncate we just forget about the pending dirtying */ - if (offset == 0) + if (offset == 0 && length == PAGE_CACHE_SIZE) ClearPageChecked(page); - return jbd2_journal_invalidatepage(journal, page, offset); + return jbd2_journal_invalidatepage(journal, page, offset, length); } /* Wrapper for aops... */ static void ext4_journalled_invalidatepage(struct page *page, - unsigned long offset) + unsigned int offset, + unsigned int length) { - WARN_ON(__ext4_journalled_invalidatepage(page, offset) < 0); + WARN_ON(__ext4_journalled_invalidatepage(page, offset, length) < 0); } static int ext4_releasepage(struct page *page, gfp_t wait) @@ -3067,9 +3003,13 @@ static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset, struct inode *inode = file_inode(iocb->ki_filp); ext4_io_end_t *io_end = iocb->private; - /* if not async direct IO or dio with 0 bytes write, just return */ - if (!io_end || !size) - goto out; + /* if not async direct IO just return */ + if (!io_end) { + inode_dio_done(inode); + if (is_async) + aio_complete(iocb, ret, 0); + return; + } ext_debug("ext4_end_io_dio(): io_end 0x%p " "for inode %lu, iocb 0x%p, offset %llu, size %zd\n", @@ -3077,25 +3017,13 @@ static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset, size); iocb->private = NULL; - - /* if not aio dio with unwritten extents, just free io and return */ - if (!(io_end->flag & EXT4_IO_END_UNWRITTEN)) { - ext4_free_io_end(io_end); -out: - inode_dio_done(inode); - if (is_async) - aio_complete(iocb, ret, 0); - return; - } - io_end->offset = offset; io_end->size = size; if (is_async) { io_end->iocb = iocb; io_end->result = ret; } - - ext4_add_complete_io(io_end); + ext4_put_io_end_defer(io_end); } /* @@ -3129,6 +3057,7 @@ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb, get_block_t *get_block_func = NULL; int dio_flags = 0; loff_t final_size = offset + count; + ext4_io_end_t *io_end = NULL; /* Use the old path for reads and writes beyond i_size. */ if (rw != WRITE || final_size > inode->i_size) @@ -3136,11 +3065,18 @@ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb, BUG_ON(iocb->private == NULL); + /* + * Make all waiters for direct IO properly wait also for extent + * conversion. This also disallows race between truncate() and + * overwrite DIO as i_dio_count needs to be incremented under i_mutex. + */ + if (rw == WRITE) + atomic_inc(&inode->i_dio_count); + /* If we do a overwrite dio, i_mutex locking can be released */ overwrite = *((int *)iocb->private); if (overwrite) { - atomic_inc(&inode->i_dio_count); down_read(&EXT4_I(inode)->i_data_sem); mutex_unlock(&inode->i_mutex); } @@ -3167,13 +3103,16 @@ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb, iocb->private = NULL; ext4_inode_aio_set(inode, NULL); if (!is_sync_kiocb(iocb)) { - ext4_io_end_t *io_end = ext4_init_io_end(inode, GFP_NOFS); + io_end = ext4_init_io_end(inode, GFP_NOFS); if (!io_end) { ret = -ENOMEM; goto retake_lock; } io_end->flag |= EXT4_IO_END_DIRECT; - iocb->private = io_end; + /* + * Grab reference for DIO. Will be dropped in ext4_end_io_dio() + */ + iocb->private = ext4_get_io_end(io_end); /* * we save the io structure for current async direct * IO, so that later ext4_map_blocks() could flag the @@ -3197,33 +3136,42 @@ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb, NULL, dio_flags); - if (iocb->private) - ext4_inode_aio_set(inode, NULL); /* - * The io_end structure takes a reference to the inode, that - * structure needs to be destroyed and the reference to the - * inode need to be dropped, when IO is complete, even with 0 - * byte write, or failed. - * - * In the successful AIO DIO case, the io_end structure will - * be destroyed and the reference to the inode will be dropped - * after the end_io call back function is called. - * - * In the case there is 0 byte write, or error case, since VFS - * direct IO won't invoke the end_io call back function, we - * need to free the end_io structure here. + * Put our reference to io_end. This can free the io_end structure e.g. + * in sync IO case or in case of error. It can even perform extent + * conversion if all bios we submitted finished before we got here. + * Note that in that case iocb->private can be already set to NULL + * here. */ - if (ret != -EIOCBQUEUED && ret <= 0 && iocb->private) { - ext4_free_io_end(iocb->private); - iocb->private = NULL; - } else if (ret > 0 && !overwrite && ext4_test_inode_state(inode, + if (io_end) { + ext4_inode_aio_set(inode, NULL); + ext4_put_io_end(io_end); + /* + * When no IO was submitted ext4_end_io_dio() was not + * called so we have to put iocb's reference. + */ + if (ret <= 0 && ret != -EIOCBQUEUED && iocb->private) { + WARN_ON(iocb->private != io_end); + WARN_ON(io_end->flag & EXT4_IO_END_UNWRITTEN); + WARN_ON(io_end->iocb); + /* + * Generic code already did inode_dio_done() so we + * have to clear EXT4_IO_END_DIRECT to not do it for + * the second time. + */ + io_end->flag = 0; + ext4_put_io_end(io_end); + iocb->private = NULL; + } + } + if (ret > 0 && !overwrite && ext4_test_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN)) { int err; /* * for non AIO case, since the IO is already * completed, we could do the conversion right here */ - err = ext4_convert_unwritten_extents(inode, + err = ext4_convert_unwritten_extents(NULL, inode, offset, ret); if (err < 0) ret = err; @@ -3231,9 +3179,10 @@ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb, } retake_lock: + if (rw == WRITE) + inode_dio_done(inode); /* take i_mutex locking again if we do a ovewrite dio */ if (overwrite) { - inode_dio_done(inode); up_read(&EXT4_I(inode)->i_data_sem); mutex_lock(&inode->i_mutex); } @@ -3292,6 +3241,7 @@ static const struct address_space_operations ext4_aops = { .readpage = ext4_readpage, .readpages = ext4_readpages, .writepage = ext4_writepage, + .writepages = ext4_writepages, .write_begin = ext4_write_begin, .write_end = ext4_write_end, .bmap = ext4_bmap, @@ -3307,6 +3257,7 @@ static const struct address_space_operations ext4_journalled_aops = { .readpage = ext4_readpage, .readpages = ext4_readpages, .writepage = ext4_writepage, + .writepages = ext4_writepages, .write_begin = ext4_write_begin, .write_end = ext4_journalled_write_end, .set_page_dirty = ext4_journalled_set_page_dirty, @@ -3322,7 +3273,7 @@ static const struct address_space_operations ext4_da_aops = { .readpage = ext4_readpage, .readpages = ext4_readpages, .writepage = ext4_writepage, - .writepages = ext4_da_writepages, + .writepages = ext4_writepages, .write_begin = ext4_da_write_begin, .write_end = ext4_da_write_end, .bmap = ext4_bmap, @@ -3355,89 +3306,56 @@ void ext4_set_aops(struct inode *inode) inode->i_mapping->a_ops = &ext4_aops; } - /* - * ext4_discard_partial_page_buffers() - * Wrapper function for ext4_discard_partial_page_buffers_no_lock. - * This function finds and locks the page containing the offset - * "from" and passes it to ext4_discard_partial_page_buffers_no_lock. - * Calling functions that already have the page locked should call - * ext4_discard_partial_page_buffers_no_lock directly. + * ext4_block_truncate_page() zeroes out a mapping from file offset `from' + * up to the end of the block which corresponds to `from'. + * This required during truncate. We need to physically zero the tail end + * of that block so it doesn't yield old data if the file is later grown. */ -int ext4_discard_partial_page_buffers(handle_t *handle, - struct address_space *mapping, loff_t from, - loff_t length, int flags) +int ext4_block_truncate_page(handle_t *handle, + struct address_space *mapping, loff_t from) { + unsigned offset = from & (PAGE_CACHE_SIZE-1); + unsigned length; + unsigned blocksize; struct inode *inode = mapping->host; - struct page *page; - int err = 0; - page = find_or_create_page(mapping, from >> PAGE_CACHE_SHIFT, - mapping_gfp_mask(mapping) & ~__GFP_FS); - if (!page) - return -ENOMEM; - - err = ext4_discard_partial_page_buffers_no_lock(handle, inode, page, - from, length, flags); + blocksize = inode->i_sb->s_blocksize; + length = blocksize - (offset & (blocksize - 1)); - unlock_page(page); - page_cache_release(page); - return err; + return ext4_block_zero_page_range(handle, mapping, from, length); } /* - * ext4_discard_partial_page_buffers_no_lock() - * Zeros a page range of length 'length' starting from offset 'from'. - * Buffer heads that correspond to the block aligned regions of the - * zeroed range will be unmapped. Unblock aligned regions - * will have the corresponding buffer head mapped if needed so that - * that region of the page can be updated with the partial zero out. - * - * This function assumes that the page has already been locked. The - * The range to be discarded must be contained with in the given page. - * If the specified range exceeds the end of the page it will be shortened - * to the end of the page that corresponds to 'from'. This function is - * appropriate for updating a page and it buffer heads to be unmapped and - * zeroed for blocks that have been either released, or are going to be - * released. - * - * handle: The journal handle - * inode: The files inode - * page: A locked page that contains the offset "from" - * from: The starting byte offset (from the beginning of the file) - * to begin discarding - * len: The length of bytes to discard - * flags: Optional flags that may be used: - * - * EXT4_DISCARD_PARTIAL_PG_ZERO_UNMAPPED - * Only zero the regions of the page whose buffer heads - * have already been unmapped. This flag is appropriate - * for updating the contents of a page whose blocks may - * have already been released, and we only want to zero - * out the regions that correspond to those released blocks. - * - * Returns zero on success or negative on failure. + * ext4_block_zero_page_range() zeros out a mapping of length 'length' + * starting from file offset 'from'. The range to be zero'd must + * be contained with in one block. If the specified range exceeds + * the end of the block it will be shortened to end of the block + * that cooresponds to 'from' */ -static int ext4_discard_partial_page_buffers_no_lock(handle_t *handle, - struct inode *inode, struct page *page, loff_t from, - loff_t length, int flags) +int ext4_block_zero_page_range(handle_t *handle, + struct address_space *mapping, loff_t from, loff_t length) { ext4_fsblk_t index = from >> PAGE_CACHE_SHIFT; - unsigned int offset = from & (PAGE_CACHE_SIZE-1); - unsigned int blocksize, max, pos; + unsigned offset = from & (PAGE_CACHE_SIZE-1); + unsigned blocksize, max, pos; ext4_lblk_t iblock; + struct inode *inode = mapping->host; struct buffer_head *bh; + struct page *page; int err = 0; - blocksize = inode->i_sb->s_blocksize; - max = PAGE_CACHE_SIZE - offset; + page = find_or_create_page(mapping, from >> PAGE_CACHE_SHIFT, + mapping_gfp_mask(mapping) & ~__GFP_FS); + if (!page) + return -ENOMEM; - if (index != page->index) - return -EINVAL; + blocksize = inode->i_sb->s_blocksize; + max = blocksize - (offset & (blocksize - 1)); /* * correct length if it does not fall between - * 'from' and the end of the page + * 'from' and the end of the block */ if (length > max || length < 0) length = max; @@ -3455,106 +3373,91 @@ static int ext4_discard_partial_page_buffers_no_lock(handle_t *handle, iblock++; pos += blocksize; } - - pos = offset; - while (pos < offset + length) { - unsigned int end_of_block, range_to_discard; - - err = 0; - - /* The length of space left to zero and unmap */ - range_to_discard = offset + length - pos; - - /* The length of space until the end of the block */ - end_of_block = blocksize - (pos & (blocksize-1)); - - /* - * Do not unmap or zero past end of block - * for this buffer head - */ - if (range_to_discard > end_of_block) - range_to_discard = end_of_block; - - - /* - * Skip this buffer head if we are only zeroing unampped - * regions of the page - */ - if (flags & EXT4_DISCARD_PARTIAL_PG_ZERO_UNMAPPED && - buffer_mapped(bh)) - goto next; - - /* If the range is block aligned, unmap */ - if (range_to_discard == blocksize) { - clear_buffer_dirty(bh); - bh->b_bdev = NULL; - clear_buffer_mapped(bh); - clear_buffer_req(bh); - clear_buffer_new(bh); - clear_buffer_delay(bh); - clear_buffer_unwritten(bh); - clear_buffer_uptodate(bh); - zero_user(page, pos, range_to_discard); - BUFFER_TRACE(bh, "Buffer discarded"); - goto next; - } - - /* - * If this block is not completely contained in the range - * to be discarded, then it is not going to be released. Because - * we need to keep this block, we need to make sure this part - * of the page is uptodate before we modify it by writeing - * partial zeros on it. - */ + if (buffer_freed(bh)) { + BUFFER_TRACE(bh, "freed: skip"); + goto unlock; + } + if (!buffer_mapped(bh)) { + BUFFER_TRACE(bh, "unmapped"); + ext4_get_block(inode, iblock, bh, 0); + /* unmapped? It's a hole - nothing to do */ if (!buffer_mapped(bh)) { - /* - * Buffer head must be mapped before we can read - * from the block - */ - BUFFER_TRACE(bh, "unmapped"); - ext4_get_block(inode, iblock, bh, 0); - /* unmapped? It's a hole - nothing to do */ - if (!buffer_mapped(bh)) { - BUFFER_TRACE(bh, "still unmapped"); - goto next; - } + BUFFER_TRACE(bh, "still unmapped"); + goto unlock; } + } - /* Ok, it's mapped. Make sure it's up-to-date */ - if (PageUptodate(page)) - set_buffer_uptodate(bh); + /* Ok, it's mapped. Make sure it's up-to-date */ + if (PageUptodate(page)) + set_buffer_uptodate(bh); - if (!buffer_uptodate(bh)) { - err = -EIO; - ll_rw_block(READ, 1, &bh); - wait_on_buffer(bh); - /* Uhhuh. Read error. Complain and punt.*/ - if (!buffer_uptodate(bh)) - goto next; - } + if (!buffer_uptodate(bh)) { + err = -EIO; + ll_rw_block(READ, 1, &bh); + wait_on_buffer(bh); + /* Uhhuh. Read error. Complain and punt. */ + if (!buffer_uptodate(bh)) + goto unlock; + } + if (ext4_should_journal_data(inode)) { + BUFFER_TRACE(bh, "get write access"); + err = ext4_journal_get_write_access(handle, bh); + if (err) + goto unlock; + } + zero_user(page, offset, length); + BUFFER_TRACE(bh, "zeroed end of block"); - if (ext4_should_journal_data(inode)) { - BUFFER_TRACE(bh, "get write access"); - err = ext4_journal_get_write_access(handle, bh); - if (err) - goto next; - } + if (ext4_should_journal_data(inode)) { + err = ext4_handle_dirty_metadata(handle, inode, bh); + } else { + err = 0; + mark_buffer_dirty(bh); + if (ext4_test_inode_state(inode, EXT4_STATE_ORDERED_MODE)) + err = ext4_jbd2_file_inode(handle, inode); + } + +unlock: + unlock_page(page); + page_cache_release(page); + return err; +} - zero_user(page, pos, range_to_discard); +int ext4_zero_partial_blocks(handle_t *handle, struct inode *inode, + loff_t lstart, loff_t length) +{ + struct super_block *sb = inode->i_sb; + struct address_space *mapping = inode->i_mapping; + unsigned partial_start, partial_end; + ext4_fsblk_t start, end; + loff_t byte_end = (lstart + length - 1); + int err = 0; - err = 0; - if (ext4_should_journal_data(inode)) { - err = ext4_handle_dirty_metadata(handle, inode, bh); - } else - mark_buffer_dirty(bh); + partial_start = lstart & (sb->s_blocksize - 1); + partial_end = byte_end & (sb->s_blocksize - 1); - BUFFER_TRACE(bh, "Partial buffer zeroed"); -next: - bh = bh->b_this_page; - iblock++; - pos += range_to_discard; - } + start = lstart >> sb->s_blocksize_bits; + end = byte_end >> sb->s_blocksize_bits; + /* Handle partial zero within the single block */ + if (start == end && + (partial_start || (partial_end != sb->s_blocksize - 1))) { + err = ext4_block_zero_page_range(handle, mapping, + lstart, length); + return err; + } + /* Handle partial zero out on the start of the range */ + if (partial_start) { + err = ext4_block_zero_page_range(handle, mapping, + lstart, sb->s_blocksize); + if (err) + return err; + } + /* Handle partial zero out on the end of the range */ + if (partial_end != sb->s_blocksize - 1) + err = ext4_block_zero_page_range(handle, mapping, + byte_end - partial_end, + partial_end + 1); return err; } @@ -3580,14 +3483,12 @@ int ext4_can_truncate(struct inode *inode) * Returns: 0 on success or negative on failure */ -int ext4_punch_hole(struct file *file, loff_t offset, loff_t length) +int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length) { - struct inode *inode = file_inode(file); struct super_block *sb = inode->i_sb; ext4_lblk_t first_block, stop_block; struct address_space *mapping = inode->i_mapping; - loff_t first_page, last_page, page_len; - loff_t first_page_offset, last_page_offset; + loff_t first_block_offset, last_block_offset; handle_t *handle; unsigned int credits; int ret = 0; @@ -3638,23 +3539,16 @@ int ext4_punch_hole(struct file *file, loff_t offset, loff_t length) offset; } - first_page = (offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; - last_page = (offset + length) >> PAGE_CACHE_SHIFT; + first_block_offset = round_up(offset, sb->s_blocksize); + last_block_offset = round_down((offset + length), sb->s_blocksize) - 1; - first_page_offset = first_page << PAGE_CACHE_SHIFT; - last_page_offset = last_page << PAGE_CACHE_SHIFT; - - /* Now release the pages */ - if (last_page_offset > first_page_offset) { - truncate_pagecache_range(inode, first_page_offset, - last_page_offset - 1); - } + /* Now release the pages and zero block aligned part of pages*/ + if (last_block_offset > first_block_offset) + truncate_pagecache_range(inode, first_block_offset, + last_block_offset); /* Wait all existing dio workers, newcomers will block on i_mutex */ ext4_inode_block_unlocked_dio(inode); - ret = ext4_flush_unwritten_io(inode); - if (ret) - goto out_dio; inode_dio_wait(inode); if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) @@ -3668,66 +3562,10 @@ int ext4_punch_hole(struct file *file, loff_t offset, loff_t length) goto out_dio; } - /* - * Now we need to zero out the non-page-aligned data in the - * pages at the start and tail of the hole, and unmap the - * buffer heads for the block aligned regions of the page that - * were completely zeroed. - */ - if (first_page > last_page) { - /* - * If the file space being truncated is contained - * within a page just zero out and unmap the middle of - * that page - */ - ret = ext4_discard_partial_page_buffers(handle, - mapping, offset, length, 0); - - if (ret) - goto out_stop; - } else { - /* - * zero out and unmap the partial page that contains - * the start of the hole - */ - page_len = first_page_offset - offset; - if (page_len > 0) { - ret = ext4_discard_partial_page_buffers(handle, mapping, - offset, page_len, 0); - if (ret) - goto out_stop; - } - - /* - * zero out and unmap the partial page that contains - * the end of the hole - */ - page_len = offset + length - last_page_offset; - if (page_len > 0) { - ret = ext4_discard_partial_page_buffers(handle, mapping, - last_page_offset, page_len, 0); - if (ret) - goto out_stop; - } - } - - /* - * If i_size is contained in the last page, we need to - * unmap and zero the partial page after i_size - */ - if (inode->i_size >> PAGE_CACHE_SHIFT == last_page && - inode->i_size % PAGE_CACHE_SIZE != 0) { - page_len = PAGE_CACHE_SIZE - - (inode->i_size & (PAGE_CACHE_SIZE - 1)); - - if (page_len > 0) { - ret = ext4_discard_partial_page_buffers(handle, - mapping, inode->i_size, page_len, 0); - - if (ret) - goto out_stop; - } - } + ret = ext4_zero_partial_blocks(handle, inode, offset, + length); + if (ret) + goto out_stop; first_block = (offset + sb->s_blocksize - 1) >> EXT4_BLOCK_SIZE_BITS(sb); @@ -3803,7 +3641,6 @@ void ext4_truncate(struct inode *inode) unsigned int credits; handle_t *handle; struct address_space *mapping = inode->i_mapping; - loff_t page_len; /* * There is a possibility that we're either freeing the inode @@ -3830,12 +3667,6 @@ void ext4_truncate(struct inode *inode) return; } - /* - * finish any pending end_io work so we won't run the risk of - * converting any truncated blocks to initialized later - */ - ext4_flush_unwritten_io(inode); - if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) credits = ext4_writepage_trans_blocks(inode); else @@ -3847,14 +3678,8 @@ void ext4_truncate(struct inode *inode) return; } - if (inode->i_size % PAGE_CACHE_SIZE != 0) { - page_len = PAGE_CACHE_SIZE - - (inode->i_size & (PAGE_CACHE_SIZE - 1)); - - if (ext4_discard_partial_page_buffers(handle, - mapping, inode->i_size, page_len, 0)) - goto out_stop; - } + if (inode->i_size & (inode->i_sb->s_blocksize - 1)) + ext4_block_truncate_page(handle, mapping, inode->i_size); /* * We add the inode to the orphan list, so that if this @@ -4623,7 +4448,8 @@ static void ext4_wait_for_tail_page_commit(struct inode *inode) inode->i_size >> PAGE_CACHE_SHIFT); if (!page) return; - ret = __ext4_journalled_invalidatepage(page, offset); + ret = __ext4_journalled_invalidatepage(page, offset, + PAGE_CACHE_SIZE - offset); unlock_page(page); page_cache_release(page); if (ret != -EBUSY) @@ -4805,7 +4631,7 @@ int ext4_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) { struct inode *inode; - unsigned long delalloc_blocks; + unsigned long long delalloc_blocks; inode = dentry->d_inode; generic_fillattr(inode, stat); @@ -4823,15 +4649,16 @@ int ext4_getattr(struct vfsmount *mnt, struct dentry *dentry, delalloc_blocks = EXT4_C2B(EXT4_SB(inode->i_sb), EXT4_I(inode)->i_reserved_data_blocks); - stat->blocks += (delalloc_blocks << inode->i_sb->s_blocksize_bits)>>9; + stat->blocks += delalloc_blocks << (inode->i_sb->s_blocksize_bits-9); return 0; } -static int ext4_index_trans_blocks(struct inode *inode, int nrblocks, int chunk) +static int ext4_index_trans_blocks(struct inode *inode, int lblocks, + int pextents) { if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) - return ext4_ind_trans_blocks(inode, nrblocks, chunk); - return ext4_ext_index_trans_blocks(inode, nrblocks, chunk); + return ext4_ind_trans_blocks(inode, lblocks); + return ext4_ext_index_trans_blocks(inode, pextents); } /* @@ -4845,7 +4672,8 @@ static int ext4_index_trans_blocks(struct inode *inode, int nrblocks, int chunk) * * Also account for superblock, inode, quota and xattr blocks */ -static int ext4_meta_trans_blocks(struct inode *inode, int nrblocks, int chunk) +static int ext4_meta_trans_blocks(struct inode *inode, int lblocks, + int pextents) { ext4_group_t groups, ngroups = ext4_get_groups_count(inode->i_sb); int gdpblocks; @@ -4853,14 +4681,10 @@ static int ext4_meta_trans_blocks(struct inode *inode, int nrblocks, int chunk) int ret = 0; /* - * How many index blocks need to touch to modify nrblocks? - * The "Chunk" flag indicating whether the nrblocks is - * physically contiguous on disk - * - * For Direct IO and fallocate, they calls get_block to allocate - * one single extent at a time, so they could set the "Chunk" flag + * How many index blocks need to touch to map @lblocks logical blocks + * to @pextents physical extents? */ - idxblocks = ext4_index_trans_blocks(inode, nrblocks, chunk); + idxblocks = ext4_index_trans_blocks(inode, lblocks, pextents); ret = idxblocks; @@ -4868,12 +4692,7 @@ static int ext4_meta_trans_blocks(struct inode *inode, int nrblocks, int chunk) * Now let's see how many group bitmaps and group descriptors need * to account */ - groups = idxblocks; - if (chunk) - groups += 1; - else - groups += nrblocks; - + groups = idxblocks + pextents; gdpblocks = groups; if (groups > ngroups) groups = ngroups; @@ -4904,7 +4723,7 @@ int ext4_writepage_trans_blocks(struct inode *inode) int bpp = ext4_journal_blocks_per_page(inode); int ret; - ret = ext4_meta_trans_blocks(inode, bpp, 0); + ret = ext4_meta_trans_blocks(inode, bpp, bpp); /* Account for data blocks for journalled mode */ if (ext4_should_journal_data(inode)) diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index def84082a9a9..a9ff5e5137ca 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c @@ -2105,6 +2105,7 @@ repeat: group = ac->ac_g_ex.fe_group; for (i = 0; i < ngroups; group++, i++) { + cond_resched(); /* * Artificially restricted ngroups for non-extent * files makes group > ngroups possible on first loop. @@ -4405,17 +4406,20 @@ ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle, repeat: /* allocate space in core */ *errp = ext4_mb_regular_allocator(ac); - if (*errp) { - ext4_discard_allocated_blocks(ac); - goto errout; - } + if (*errp) + goto discard_and_exit; /* as we've just preallocated more space than - * user requested orinally, we store allocated + * user requested originally, we store allocated * space in a special descriptor */ if (ac->ac_status == AC_STATUS_FOUND && - ac->ac_o_ex.fe_len < ac->ac_b_ex.fe_len) - ext4_mb_new_preallocation(ac); + ac->ac_o_ex.fe_len < ac->ac_b_ex.fe_len) + *errp = ext4_mb_new_preallocation(ac); + if (*errp) { + discard_and_exit: + ext4_discard_allocated_blocks(ac); + goto errout; + } } if (likely(ac->ac_status == AC_STATUS_FOUND)) { *errp = ext4_mb_mark_diskspace_used(ac, handle, reserv_clstrs); @@ -4612,10 +4616,11 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode, BUG_ON(bh && (count > 1)); for (i = 0; i < count; i++) { + cond_resched(); if (!bh) tbh = sb_find_get_block(inode->i_sb, block + i); - if (unlikely(!tbh)) + if (!tbh) continue; ext4_forget(handle, flags & EXT4_FREE_BLOCKS_METADATA, inode, tbh, block + i); diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c index 3dcbf364022f..e86dddbd8296 100644 --- a/fs/ext4/move_extent.c +++ b/fs/ext4/move_extent.c @@ -912,7 +912,6 @@ move_extent_per_page(struct file *o_filp, struct inode *donor_inode, struct page *pagep[2] = {NULL, NULL}; handle_t *handle; ext4_lblk_t orig_blk_offset; - long long offs = orig_page_offset << PAGE_CACHE_SHIFT; unsigned long blocksize = orig_inode->i_sb->s_blocksize; unsigned int w_flags = 0; unsigned int tmp_data_size, data_size, replaced_size; @@ -940,8 +939,6 @@ again: orig_blk_offset = orig_page_offset * blocks_per_page + data_offset_in_page; - offs = (long long)orig_blk_offset << orig_inode->i_blkbits; - /* Calculate data_size */ if ((orig_blk_offset + block_len_in_page - 1) == ((orig_inode->i_size - 1) >> orig_inode->i_blkbits)) { diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c index 6653fc35ecb7..234b834d5a97 100644 --- a/fs/ext4/namei.c +++ b/fs/ext4/namei.c @@ -918,11 +918,8 @@ static int htree_dirblock_to_tree(struct file *dir_file, bh->b_data, bh->b_size, (block<<EXT4_BLOCK_SIZE_BITS(dir->i_sb)) + ((char *)de - bh->b_data))) { - /* On error, skip the f_pos to the next block. */ - dir_file->f_pos = (dir_file->f_pos | - (dir->i_sb->s_blocksize - 1)) + 1; - brelse(bh); - return count; + /* silently ignore the rest of the block */ + break; } ext4fs_dirhash(de->name, de->name_len, hinfo); if ((hinfo->hash < start_hash) || @@ -2299,6 +2296,45 @@ retry: return err; } +static int ext4_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode) +{ + handle_t *handle; + struct inode *inode; + int err, retries = 0; + + dquot_initialize(dir); + +retry: + inode = ext4_new_inode_start_handle(dir, mode, + NULL, 0, NULL, + EXT4_HT_DIR, + EXT4_MAXQUOTAS_INIT_BLOCKS(dir->i_sb) + + 4 + EXT4_XATTR_TRANS_BLOCKS); + handle = ext4_journal_current_handle(); + err = PTR_ERR(inode); + if (!IS_ERR(inode)) { + inode->i_op = &ext4_file_inode_operations; + inode->i_fop = &ext4_file_operations; + ext4_set_aops(inode); + err = ext4_orphan_add(handle, inode); + if (err) + goto err_drop_inode; + mark_inode_dirty(inode); + d_tmpfile(dentry, inode); + unlock_new_inode(inode); + } + if (handle) + ext4_journal_stop(handle); + if (err == -ENOSPC && ext4_should_retry_alloc(dir->i_sb, &retries)) + goto retry; + return err; +err_drop_inode: + ext4_journal_stop(handle); + unlock_new_inode(inode); + iput(inode); + return err; +} + struct ext4_dir_entry_2 *ext4_init_dot_dotdot(struct inode *inode, struct ext4_dir_entry_2 *de, int blocksize, int csum_size, @@ -2906,7 +2942,7 @@ static int ext4_link(struct dentry *old_dentry, retry: handle = ext4_journal_start(dir, EXT4_HT_DIR, (EXT4_DATA_TRANS_BLOCKS(dir->i_sb) + - EXT4_INDEX_EXTRA_TRANS_BLOCKS)); + EXT4_INDEX_EXTRA_TRANS_BLOCKS) + 1); if (IS_ERR(handle)) return PTR_ERR(handle); @@ -2920,6 +2956,11 @@ retry: err = ext4_add_entry(handle, dentry, inode); if (!err) { ext4_mark_inode_dirty(handle, inode); + /* this can happen only for tmpfile being + * linked the first time + */ + if (inode->i_nlink == 1) + ext4_orphan_del(handle, inode); d_instantiate(dentry, inode); } else { drop_nlink(inode); @@ -3172,6 +3213,7 @@ const struct inode_operations ext4_dir_inode_operations = { .mkdir = ext4_mkdir, .rmdir = ext4_rmdir, .mknod = ext4_mknod, + .tmpfile = ext4_tmpfile, .rename = ext4_rename, .setattr = ext4_setattr, .setxattr = generic_setxattr, diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c index 4acf1f78881b..48786cdb5e6c 100644 --- a/fs/ext4/page-io.c +++ b/fs/ext4/page-io.c @@ -46,46 +46,121 @@ void ext4_exit_pageio(void) } /* - * This function is called by ext4_evict_inode() to make sure there is - * no more pending I/O completion work left to do. + * Print an buffer I/O error compatible with the fs/buffer.c. This + * provides compatibility with dmesg scrapers that look for a specific + * buffer I/O error message. We really need a unified error reporting + * structure to userspace ala Digital Unix's uerf system, but it's + * probably not going to happen in my lifetime, due to LKML politics... */ -void ext4_ioend_shutdown(struct inode *inode) +static void buffer_io_error(struct buffer_head *bh) { - wait_queue_head_t *wq = ext4_ioend_wq(inode); + char b[BDEVNAME_SIZE]; + printk(KERN_ERR "Buffer I/O error on device %s, logical block %llu\n", + bdevname(bh->b_bdev, b), + (unsigned long long)bh->b_blocknr); +} - wait_event(*wq, (atomic_read(&EXT4_I(inode)->i_ioend_count) == 0)); - /* - * We need to make sure the work structure is finished being - * used before we let the inode get destroyed. - */ - if (work_pending(&EXT4_I(inode)->i_unwritten_work)) - cancel_work_sync(&EXT4_I(inode)->i_unwritten_work); +static void ext4_finish_bio(struct bio *bio) +{ + int i; + int error = !test_bit(BIO_UPTODATE, &bio->bi_flags); + + for (i = 0; i < bio->bi_vcnt; i++) { + struct bio_vec *bvec = &bio->bi_io_vec[i]; + struct page *page = bvec->bv_page; + struct buffer_head *bh, *head; + unsigned bio_start = bvec->bv_offset; + unsigned bio_end = bio_start + bvec->bv_len; + unsigned under_io = 0; + unsigned long flags; + + if (!page) + continue; + + if (error) { + SetPageError(page); + set_bit(AS_EIO, &page->mapping->flags); + } + bh = head = page_buffers(page); + /* + * We check all buffers in the page under BH_Uptodate_Lock + * to avoid races with other end io clearing async_write flags + */ + local_irq_save(flags); + bit_spin_lock(BH_Uptodate_Lock, &head->b_state); + do { + if (bh_offset(bh) < bio_start || + bh_offset(bh) + bh->b_size > bio_end) { + if (buffer_async_write(bh)) + under_io++; + continue; + } + clear_buffer_async_write(bh); + if (error) + buffer_io_error(bh); + } while ((bh = bh->b_this_page) != head); + bit_spin_unlock(BH_Uptodate_Lock, &head->b_state); + local_irq_restore(flags); + if (!under_io) + end_page_writeback(page); + } } -void ext4_free_io_end(ext4_io_end_t *io) +static void ext4_release_io_end(ext4_io_end_t *io_end) { - BUG_ON(!io); - BUG_ON(!list_empty(&io->list)); - BUG_ON(io->flag & EXT4_IO_END_UNWRITTEN); + struct bio *bio, *next_bio; + + BUG_ON(!list_empty(&io_end->list)); + BUG_ON(io_end->flag & EXT4_IO_END_UNWRITTEN); + WARN_ON(io_end->handle); - if (atomic_dec_and_test(&EXT4_I(io->inode)->i_ioend_count)) - wake_up_all(ext4_ioend_wq(io->inode)); - kmem_cache_free(io_end_cachep, io); + if (atomic_dec_and_test(&EXT4_I(io_end->inode)->i_ioend_count)) + wake_up_all(ext4_ioend_wq(io_end->inode)); + + for (bio = io_end->bio; bio; bio = next_bio) { + next_bio = bio->bi_private; + ext4_finish_bio(bio); + bio_put(bio); + } + if (io_end->flag & EXT4_IO_END_DIRECT) + inode_dio_done(io_end->inode); + if (io_end->iocb) + aio_complete(io_end->iocb, io_end->result, 0); + kmem_cache_free(io_end_cachep, io_end); } -/* check a range of space and convert unwritten extents to written. */ +static void ext4_clear_io_unwritten_flag(ext4_io_end_t *io_end) +{ + struct inode *inode = io_end->inode; + + io_end->flag &= ~EXT4_IO_END_UNWRITTEN; + /* Wake up anyone waiting on unwritten extent conversion */ + if (atomic_dec_and_test(&EXT4_I(inode)->i_unwritten)) + wake_up_all(ext4_ioend_wq(inode)); +} + +/* + * Check a range of space and convert unwritten extents to written. Note that + * we are protected from truncate touching same part of extent tree by the + * fact that truncate code waits for all DIO to finish (thus exclusion from + * direct IO is achieved) and also waits for PageWriteback bits. Thus we + * cannot get to ext4_ext_truncate() before all IOs overlapping that range are + * completed (happens from ext4_free_ioend()). + */ static int ext4_end_io(ext4_io_end_t *io) { struct inode *inode = io->inode; loff_t offset = io->offset; ssize_t size = io->size; + handle_t *handle = io->handle; int ret = 0; ext4_debug("ext4_end_io_nolock: io 0x%p from inode %lu,list->next 0x%p," "list->prev 0x%p\n", io, inode->i_ino, io->list.next, io->list.prev); - ret = ext4_convert_unwritten_extents(inode, offset, size); + io->handle = NULL; /* Following call will use up the handle */ + ret = ext4_convert_unwritten_extents(handle, inode, offset, size); if (ret < 0) { ext4_msg(inode->i_sb, KERN_EMERG, "failed to convert unwritten extents to written " @@ -93,30 +168,22 @@ static int ext4_end_io(ext4_io_end_t *io) "(inode %lu, offset %llu, size %zd, error %d)", inode->i_ino, offset, size, ret); } - /* Wake up anyone waiting on unwritten extent conversion */ - if (atomic_dec_and_test(&EXT4_I(inode)->i_unwritten)) - wake_up_all(ext4_ioend_wq(inode)); - if (io->flag & EXT4_IO_END_DIRECT) - inode_dio_done(inode); - if (io->iocb) - aio_complete(io->iocb, io->result, 0); + ext4_clear_io_unwritten_flag(io); + ext4_release_io_end(io); return ret; } -static void dump_completed_IO(struct inode *inode) +static void dump_completed_IO(struct inode *inode, struct list_head *head) { #ifdef EXT4FS_DEBUG struct list_head *cur, *before, *after; ext4_io_end_t *io, *io0, *io1; - if (list_empty(&EXT4_I(inode)->i_completed_io_list)) { - ext4_debug("inode %lu completed_io list is empty\n", - inode->i_ino); + if (list_empty(head)) return; - } - ext4_debug("Dump inode %lu completed_io list\n", inode->i_ino); - list_for_each_entry(io, &EXT4_I(inode)->i_completed_io_list, list) { + ext4_debug("Dump inode %lu completed io list\n", inode->i_ino); + list_for_each_entry(io, head, list) { cur = &io->list; before = cur->prev; io0 = container_of(before, ext4_io_end_t, list); @@ -130,23 +197,30 @@ static void dump_completed_IO(struct inode *inode) } /* Add the io_end to per-inode completed end_io list. */ -void ext4_add_complete_io(ext4_io_end_t *io_end) +static void ext4_add_complete_io(ext4_io_end_t *io_end) { struct ext4_inode_info *ei = EXT4_I(io_end->inode); struct workqueue_struct *wq; unsigned long flags; BUG_ON(!(io_end->flag & EXT4_IO_END_UNWRITTEN)); - wq = EXT4_SB(io_end->inode->i_sb)->dio_unwritten_wq; - spin_lock_irqsave(&ei->i_completed_io_lock, flags); - if (list_empty(&ei->i_completed_io_list)) - queue_work(wq, &ei->i_unwritten_work); - list_add_tail(&io_end->list, &ei->i_completed_io_list); + if (io_end->handle) { + wq = EXT4_SB(io_end->inode->i_sb)->rsv_conversion_wq; + if (list_empty(&ei->i_rsv_conversion_list)) + queue_work(wq, &ei->i_rsv_conversion_work); + list_add_tail(&io_end->list, &ei->i_rsv_conversion_list); + } else { + wq = EXT4_SB(io_end->inode->i_sb)->unrsv_conversion_wq; + if (list_empty(&ei->i_unrsv_conversion_list)) + queue_work(wq, &ei->i_unrsv_conversion_work); + list_add_tail(&io_end->list, &ei->i_unrsv_conversion_list); + } spin_unlock_irqrestore(&ei->i_completed_io_lock, flags); } -static int ext4_do_flush_completed_IO(struct inode *inode) +static int ext4_do_flush_completed_IO(struct inode *inode, + struct list_head *head) { ext4_io_end_t *io; struct list_head unwritten; @@ -155,8 +229,8 @@ static int ext4_do_flush_completed_IO(struct inode *inode) int err, ret = 0; spin_lock_irqsave(&ei->i_completed_io_lock, flags); - dump_completed_IO(inode); - list_replace_init(&ei->i_completed_io_list, &unwritten); + dump_completed_IO(inode, head); + list_replace_init(head, &unwritten); spin_unlock_irqrestore(&ei->i_completed_io_lock, flags); while (!list_empty(&unwritten)) { @@ -167,30 +241,25 @@ static int ext4_do_flush_completed_IO(struct inode *inode) err = ext4_end_io(io); if (unlikely(!ret && err)) ret = err; - io->flag &= ~EXT4_IO_END_UNWRITTEN; - ext4_free_io_end(io); } return ret; } /* - * work on completed aio dio IO, to convert unwritten extents to extents + * work on completed IO, to convert unwritten extents to extents */ -void ext4_end_io_work(struct work_struct *work) +void ext4_end_io_rsv_work(struct work_struct *work) { struct ext4_inode_info *ei = container_of(work, struct ext4_inode_info, - i_unwritten_work); - ext4_do_flush_completed_IO(&ei->vfs_inode); + i_rsv_conversion_work); + ext4_do_flush_completed_IO(&ei->vfs_inode, &ei->i_rsv_conversion_list); } -int ext4_flush_unwritten_io(struct inode *inode) +void ext4_end_io_unrsv_work(struct work_struct *work) { - int ret; - WARN_ON_ONCE(!mutex_is_locked(&inode->i_mutex) && - !(inode->i_state & I_FREEING)); - ret = ext4_do_flush_completed_IO(inode); - ext4_unwritten_wait(inode); - return ret; + struct ext4_inode_info *ei = container_of(work, struct ext4_inode_info, + i_unrsv_conversion_work); + ext4_do_flush_completed_IO(&ei->vfs_inode, &ei->i_unrsv_conversion_list); } ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags) @@ -200,83 +269,70 @@ ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags) atomic_inc(&EXT4_I(inode)->i_ioend_count); io->inode = inode; INIT_LIST_HEAD(&io->list); + atomic_set(&io->count, 1); } return io; } -/* - * Print an buffer I/O error compatible with the fs/buffer.c. This - * provides compatibility with dmesg scrapers that look for a specific - * buffer I/O error message. We really need a unified error reporting - * structure to userspace ala Digital Unix's uerf system, but it's - * probably not going to happen in my lifetime, due to LKML politics... - */ -static void buffer_io_error(struct buffer_head *bh) +void ext4_put_io_end_defer(ext4_io_end_t *io_end) { - char b[BDEVNAME_SIZE]; - printk(KERN_ERR "Buffer I/O error on device %s, logical block %llu\n", - bdevname(bh->b_bdev, b), - (unsigned long long)bh->b_blocknr); + if (atomic_dec_and_test(&io_end->count)) { + if (!(io_end->flag & EXT4_IO_END_UNWRITTEN) || !io_end->size) { + ext4_release_io_end(io_end); + return; + } + ext4_add_complete_io(io_end); + } +} + +int ext4_put_io_end(ext4_io_end_t *io_end) +{ + int err = 0; + + if (atomic_dec_and_test(&io_end->count)) { + if (io_end->flag & EXT4_IO_END_UNWRITTEN) { + err = ext4_convert_unwritten_extents(io_end->handle, + io_end->inode, io_end->offset, + io_end->size); + io_end->handle = NULL; + ext4_clear_io_unwritten_flag(io_end); + } + ext4_release_io_end(io_end); + } + return err; +} + +ext4_io_end_t *ext4_get_io_end(ext4_io_end_t *io_end) +{ + atomic_inc(&io_end->count); + return io_end; } static void ext4_end_bio(struct bio *bio, int error) { ext4_io_end_t *io_end = bio->bi_private; - struct inode *inode; - int i; - int blocksize; sector_t bi_sector = bio->bi_sector; BUG_ON(!io_end); - inode = io_end->inode; - blocksize = 1 << inode->i_blkbits; - bio->bi_private = NULL; bio->bi_end_io = NULL; if (test_bit(BIO_UPTODATE, &bio->bi_flags)) error = 0; - for (i = 0; i < bio->bi_vcnt; i++) { - struct bio_vec *bvec = &bio->bi_io_vec[i]; - struct page *page = bvec->bv_page; - struct buffer_head *bh, *head; - unsigned bio_start = bvec->bv_offset; - unsigned bio_end = bio_start + bvec->bv_len; - unsigned under_io = 0; - unsigned long flags; - if (!page) - continue; - - if (error) { - SetPageError(page); - set_bit(AS_EIO, &page->mapping->flags); - } - bh = head = page_buffers(page); + if (io_end->flag & EXT4_IO_END_UNWRITTEN) { /* - * We check all buffers in the page under BH_Uptodate_Lock - * to avoid races with other end io clearing async_write flags + * Link bio into list hanging from io_end. We have to do it + * atomically as bio completions can be racing against each + * other. */ - local_irq_save(flags); - bit_spin_lock(BH_Uptodate_Lock, &head->b_state); - do { - if (bh_offset(bh) < bio_start || - bh_offset(bh) + blocksize > bio_end) { - if (buffer_async_write(bh)) - under_io++; - continue; - } - clear_buffer_async_write(bh); - if (error) - buffer_io_error(bh); - } while ((bh = bh->b_this_page) != head); - bit_spin_unlock(BH_Uptodate_Lock, &head->b_state); - local_irq_restore(flags); - if (!under_io) - end_page_writeback(page); + bio->bi_private = xchg(&io_end->bio, bio); + } else { + ext4_finish_bio(bio); + bio_put(bio); } - bio_put(bio); if (error) { - io_end->flag |= EXT4_IO_END_ERROR; + struct inode *inode = io_end->inode; + ext4_warning(inode->i_sb, "I/O error writing to inode %lu " "(offset %llu size %ld starting block %llu)", inode->i_ino, @@ -285,13 +341,7 @@ static void ext4_end_bio(struct bio *bio, int error) (unsigned long long) bi_sector >> (inode->i_blkbits - 9)); } - - if (!(io_end->flag & EXT4_IO_END_UNWRITTEN)) { - ext4_free_io_end(io_end); - return; - } - - ext4_add_complete_io(io_end); + ext4_put_io_end_defer(io_end); } void ext4_io_submit(struct ext4_io_submit *io) @@ -305,43 +355,38 @@ void ext4_io_submit(struct ext4_io_submit *io) bio_put(io->io_bio); } io->io_bio = NULL; - io->io_op = 0; +} + +void ext4_io_submit_init(struct ext4_io_submit *io, + struct writeback_control *wbc) +{ + io->io_op = (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE); + io->io_bio = NULL; io->io_end = NULL; } -static int io_submit_init(struct ext4_io_submit *io, - struct inode *inode, - struct writeback_control *wbc, - struct buffer_head *bh) +static int io_submit_init_bio(struct ext4_io_submit *io, + struct buffer_head *bh) { - ext4_io_end_t *io_end; - struct page *page = bh->b_page; int nvecs = bio_get_nr_vecs(bh->b_bdev); struct bio *bio; - io_end = ext4_init_io_end(inode, GFP_NOFS); - if (!io_end) - return -ENOMEM; bio = bio_alloc(GFP_NOIO, min(nvecs, BIO_MAX_PAGES)); + if (!bio) + return -ENOMEM; bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9); bio->bi_bdev = bh->b_bdev; - bio->bi_private = io->io_end = io_end; bio->bi_end_io = ext4_end_bio; - - io_end->offset = (page->index << PAGE_CACHE_SHIFT) + bh_offset(bh); - + bio->bi_private = ext4_get_io_end(io->io_end); io->io_bio = bio; - io->io_op = (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE); io->io_next_block = bh->b_blocknr; return 0; } static int io_submit_add_bh(struct ext4_io_submit *io, struct inode *inode, - struct writeback_control *wbc, struct buffer_head *bh) { - ext4_io_end_t *io_end; int ret; if (io->io_bio && bh->b_blocknr != io->io_next_block) { @@ -349,18 +394,14 @@ submit_and_retry: ext4_io_submit(io); } if (io->io_bio == NULL) { - ret = io_submit_init(io, inode, wbc, bh); + ret = io_submit_init_bio(io, bh); if (ret) return ret; } - io_end = io->io_end; - if (test_clear_buffer_uninit(bh)) - ext4_set_io_unwritten_flag(inode, io_end); - io->io_end->size += bh->b_size; - io->io_next_block++; ret = bio_add_page(io->io_bio, bh->b_page, bh->b_size, bh_offset(bh)); if (ret != bh->b_size) goto submit_and_retry; + io->io_next_block++; return 0; } @@ -432,7 +473,7 @@ int ext4_bio_write_page(struct ext4_io_submit *io, do { if (!buffer_async_write(bh)) continue; - ret = io_submit_add_bh(io, inode, wbc, bh); + ret = io_submit_add_bh(io, inode, bh); if (ret) { /* * We only get here on ENOMEM. Not much else diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c index b27c96d01965..c5adbb318a90 100644 --- a/fs/ext4/resize.c +++ b/fs/ext4/resize.c @@ -79,12 +79,20 @@ static int verify_group_input(struct super_block *sb, ext4_fsblk_t end = start + input->blocks_count; ext4_group_t group = input->group; ext4_fsblk_t itend = input->inode_table + sbi->s_itb_per_group; - unsigned overhead = ext4_group_overhead_blocks(sb, group); - ext4_fsblk_t metaend = start + overhead; + unsigned overhead; + ext4_fsblk_t metaend; struct buffer_head *bh = NULL; ext4_grpblk_t free_blocks_count, offset; int err = -EINVAL; + if (group != sbi->s_groups_count) { + ext4_warning(sb, "Cannot add at group %u (only %u groups)", + input->group, sbi->s_groups_count); + return -EINVAL; + } + + overhead = ext4_group_overhead_blocks(sb, group); + metaend = start + overhead; input->free_blocks_count = free_blocks_count = input->blocks_count - 2 - overhead - sbi->s_itb_per_group; @@ -96,10 +104,7 @@ static int verify_group_input(struct super_block *sb, free_blocks_count, input->reserved_blocks); ext4_get_group_no_and_offset(sb, start, NULL, &offset); - if (group != sbi->s_groups_count) - ext4_warning(sb, "Cannot add at group %u (only %u groups)", - input->group, sbi->s_groups_count); - else if (offset != 0) + if (offset != 0) ext4_warning(sb, "Last group not full"); else if (input->reserved_blocks > input->blocks_count / 5) ext4_warning(sb, "Reserved blocks too high (%u)", @@ -1551,11 +1556,10 @@ int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input) int reserved_gdb = ext4_bg_has_super(sb, input->group) ? le16_to_cpu(es->s_reserved_gdt_blocks) : 0; struct inode *inode = NULL; - int gdb_off, gdb_num; + int gdb_off; int err; __u16 bg_flags = 0; - gdb_num = input->group / EXT4_DESC_PER_BLOCK(sb); gdb_off = input->group % EXT4_DESC_PER_BLOCK(sb); if (gdb_off == 0 && !EXT4_HAS_RO_COMPAT_FEATURE(sb, @@ -1656,12 +1660,10 @@ errout: err = err2; if (!err) { - ext4_fsblk_t first_block; - first_block = ext4_group_first_block_no(sb, 0); if (test_opt(sb, DEBUG)) printk(KERN_DEBUG "EXT4-fs: extended group to %llu " "blocks\n", ext4_blocks_count(es)); - update_backups(sb, EXT4_SB(sb)->s_sbh->b_blocknr - first_block, + update_backups(sb, EXT4_SB(sb)->s_sbh->b_blocknr, (char *)es, sizeof(struct ext4_super_block), 0); } return err; diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 94cc84db7c9a..85b3dd60169b 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -69,6 +69,7 @@ static void ext4_mark_recovery_complete(struct super_block *sb, static void ext4_clear_journal_err(struct super_block *sb, struct ext4_super_block *es); static int ext4_sync_fs(struct super_block *sb, int wait); +static int ext4_sync_fs_nojournal(struct super_block *sb, int wait); static int ext4_remount(struct super_block *sb, int *flags, char *data); static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf); static int ext4_unfreeze(struct super_block *sb); @@ -398,6 +399,11 @@ static void ext4_handle_error(struct super_block *sb) } if (test_opt(sb, ERRORS_RO)) { ext4_msg(sb, KERN_CRIT, "Remounting filesystem read-only"); + /* + * Make sure updated value of ->s_mount_flags will be visible + * before ->s_flags update + */ + smp_wmb(); sb->s_flags |= MS_RDONLY; } if (test_opt(sb, ERRORS_PANIC)) @@ -422,9 +428,9 @@ void __ext4_error(struct super_block *sb, const char *function, ext4_handle_error(sb); } -void ext4_error_inode(struct inode *inode, const char *function, - unsigned int line, ext4_fsblk_t block, - const char *fmt, ...) +void __ext4_error_inode(struct inode *inode, const char *function, + unsigned int line, ext4_fsblk_t block, + const char *fmt, ...) { va_list args; struct va_format vaf; @@ -451,9 +457,9 @@ void ext4_error_inode(struct inode *inode, const char *function, ext4_handle_error(inode->i_sb); } -void ext4_error_file(struct file *file, const char *function, - unsigned int line, ext4_fsblk_t block, - const char *fmt, ...) +void __ext4_error_file(struct file *file, const char *function, + unsigned int line, ext4_fsblk_t block, + const char *fmt, ...) { va_list args; struct va_format vaf; @@ -570,8 +576,13 @@ void __ext4_abort(struct super_block *sb, const char *function, if ((sb->s_flags & MS_RDONLY) == 0) { ext4_msg(sb, KERN_CRIT, "Remounting filesystem read-only"); - sb->s_flags |= MS_RDONLY; EXT4_SB(sb)->s_mount_flags |= EXT4_MF_FS_ABORTED; + /* + * Make sure updated value of ->s_mount_flags will be visible + * before ->s_flags update + */ + smp_wmb(); + sb->s_flags |= MS_RDONLY; if (EXT4_SB(sb)->s_journal) jbd2_journal_abort(EXT4_SB(sb)->s_journal, -EIO); save_error_info(sb, function, line); @@ -580,7 +591,8 @@ void __ext4_abort(struct super_block *sb, const char *function, panic("EXT4-fs panic from previous error\n"); } -void ext4_msg(struct super_block *sb, const char *prefix, const char *fmt, ...) +void __ext4_msg(struct super_block *sb, + const char *prefix, const char *fmt, ...) { struct va_format vaf; va_list args; @@ -750,8 +762,10 @@ static void ext4_put_super(struct super_block *sb) ext4_unregister_li_request(sb); dquot_disable(sb, -1, DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED); - flush_workqueue(sbi->dio_unwritten_wq); - destroy_workqueue(sbi->dio_unwritten_wq); + flush_workqueue(sbi->unrsv_conversion_wq); + flush_workqueue(sbi->rsv_conversion_wq); + destroy_workqueue(sbi->unrsv_conversion_wq); + destroy_workqueue(sbi->rsv_conversion_wq); if (sbi->s_journal) { err = jbd2_journal_destroy(sbi->s_journal); @@ -760,7 +774,7 @@ static void ext4_put_super(struct super_block *sb) ext4_abort(sb, "Couldn't clean up the journal"); } - ext4_es_unregister_shrinker(sb); + ext4_es_unregister_shrinker(sbi); del_timer(&sbi->s_err_report); ext4_release_system_zone(sb); ext4_mb_release(sb); @@ -849,6 +863,7 @@ static struct inode *ext4_alloc_inode(struct super_block *sb) rwlock_init(&ei->i_es_lock); INIT_LIST_HEAD(&ei->i_es_lru); ei->i_es_lru_nr = 0; + ei->i_touch_when = 0; ei->i_reserved_data_blocks = 0; ei->i_reserved_meta_blocks = 0; ei->i_allocated_meta_blocks = 0; @@ -859,13 +874,15 @@ static struct inode *ext4_alloc_inode(struct super_block *sb) ei->i_reserved_quota = 0; #endif ei->jinode = NULL; - INIT_LIST_HEAD(&ei->i_completed_io_list); + INIT_LIST_HEAD(&ei->i_rsv_conversion_list); + INIT_LIST_HEAD(&ei->i_unrsv_conversion_list); spin_lock_init(&ei->i_completed_io_lock); ei->i_sync_tid = 0; ei->i_datasync_tid = 0; atomic_set(&ei->i_ioend_count, 0); atomic_set(&ei->i_unwritten, 0); - INIT_WORK(&ei->i_unwritten_work, ext4_end_io_work); + INIT_WORK(&ei->i_rsv_conversion_work, ext4_end_io_rsv_work); + INIT_WORK(&ei->i_unrsv_conversion_work, ext4_end_io_unrsv_work); return &ei->vfs_inode; } @@ -1093,6 +1110,7 @@ static const struct super_operations ext4_nojournal_sops = { .dirty_inode = ext4_dirty_inode, .drop_inode = ext4_drop_inode, .evict_inode = ext4_evict_inode, + .sync_fs = ext4_sync_fs_nojournal, .put_super = ext4_put_super, .statfs = ext4_statfs, .remount_fs = ext4_remount, @@ -1908,7 +1926,6 @@ static int ext4_fill_flex_info(struct super_block *sb) struct ext4_sb_info *sbi = EXT4_SB(sb); struct ext4_group_desc *gdp = NULL; ext4_group_t flex_group; - unsigned int groups_per_flex = 0; int i, err; sbi->s_log_groups_per_flex = sbi->s_es->s_log_groups_per_flex; @@ -1916,7 +1933,6 @@ static int ext4_fill_flex_info(struct super_block *sb) sbi->s_log_groups_per_flex = 0; return 1; } - groups_per_flex = 1U << sbi->s_log_groups_per_flex; err = ext4_alloc_flex_bg_array(sb, sbi->s_groups_count); if (err) @@ -2164,19 +2180,22 @@ static void ext4_orphan_cleanup(struct super_block *sb, list_add(&EXT4_I(inode)->i_orphan, &EXT4_SB(sb)->s_orphan); dquot_initialize(inode); if (inode->i_nlink) { - ext4_msg(sb, KERN_DEBUG, - "%s: truncating inode %lu to %lld bytes", - __func__, inode->i_ino, inode->i_size); + if (test_opt(sb, DEBUG)) + ext4_msg(sb, KERN_DEBUG, + "%s: truncating inode %lu to %lld bytes", + __func__, inode->i_ino, inode->i_size); jbd_debug(2, "truncating inode %lu to %lld bytes\n", inode->i_ino, inode->i_size); mutex_lock(&inode->i_mutex); + truncate_inode_pages(inode->i_mapping, inode->i_size); ext4_truncate(inode); mutex_unlock(&inode->i_mutex); nr_truncates++; } else { - ext4_msg(sb, KERN_DEBUG, - "%s: deleting unreferenced inode %lu", - __func__, inode->i_ino); + if (test_opt(sb, DEBUG)) + ext4_msg(sb, KERN_DEBUG, + "%s: deleting unreferenced inode %lu", + __func__, inode->i_ino); jbd_debug(2, "deleting unreferenced inode %lu\n", inode->i_ino); nr_orphans++; @@ -2377,7 +2396,10 @@ struct ext4_attr { ssize_t (*show)(struct ext4_attr *, struct ext4_sb_info *, char *); ssize_t (*store)(struct ext4_attr *, struct ext4_sb_info *, const char *, size_t); - int offset; + union { + int offset; + int deprecated_val; + } u; }; static int parse_strtoull(const char *buf, @@ -2446,7 +2468,7 @@ static ssize_t inode_readahead_blks_store(struct ext4_attr *a, static ssize_t sbi_ui_show(struct ext4_attr *a, struct ext4_sb_info *sbi, char *buf) { - unsigned int *ui = (unsigned int *) (((char *) sbi) + a->offset); + unsigned int *ui = (unsigned int *) (((char *) sbi) + a->u.offset); return snprintf(buf, PAGE_SIZE, "%u\n", *ui); } @@ -2455,7 +2477,7 @@ static ssize_t sbi_ui_store(struct ext4_attr *a, struct ext4_sb_info *sbi, const char *buf, size_t count) { - unsigned int *ui = (unsigned int *) (((char *) sbi) + a->offset); + unsigned int *ui = (unsigned int *) (((char *) sbi) + a->u.offset); unsigned long t; int ret; @@ -2504,12 +2526,20 @@ static ssize_t trigger_test_error(struct ext4_attr *a, return count; } +static ssize_t sbi_deprecated_show(struct ext4_attr *a, + struct ext4_sb_info *sbi, char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%d\n", a->u.deprecated_val); +} + #define EXT4_ATTR_OFFSET(_name,_mode,_show,_store,_elname) \ static struct ext4_attr ext4_attr_##_name = { \ .attr = {.name = __stringify(_name), .mode = _mode }, \ .show = _show, \ .store = _store, \ - .offset = offsetof(struct ext4_sb_info, _elname), \ + .u = { \ + .offset = offsetof(struct ext4_sb_info, _elname),\ + }, \ } #define EXT4_ATTR(name, mode, show, store) \ static struct ext4_attr ext4_attr_##name = __ATTR(name, mode, show, store) @@ -2520,6 +2550,14 @@ static struct ext4_attr ext4_attr_##name = __ATTR(name, mode, show, store) #define EXT4_RW_ATTR_SBI_UI(name, elname) \ EXT4_ATTR_OFFSET(name, 0644, sbi_ui_show, sbi_ui_store, elname) #define ATTR_LIST(name) &ext4_attr_##name.attr +#define EXT4_DEPRECATED_ATTR(_name, _val) \ +static struct ext4_attr ext4_attr_##_name = { \ + .attr = {.name = __stringify(_name), .mode = 0444 }, \ + .show = sbi_deprecated_show, \ + .u = { \ + .deprecated_val = _val, \ + }, \ +} EXT4_RO_ATTR(delayed_allocation_blocks); EXT4_RO_ATTR(session_write_kbytes); @@ -2534,7 +2572,7 @@ EXT4_RW_ATTR_SBI_UI(mb_min_to_scan, s_mb_min_to_scan); EXT4_RW_ATTR_SBI_UI(mb_order2_req, s_mb_order2_reqs); EXT4_RW_ATTR_SBI_UI(mb_stream_req, s_mb_stream_request); EXT4_RW_ATTR_SBI_UI(mb_group_prealloc, s_mb_group_prealloc); -EXT4_RW_ATTR_SBI_UI(max_writeback_mb_bump, s_max_writeback_mb_bump); +EXT4_DEPRECATED_ATTR(max_writeback_mb_bump, 128); EXT4_RW_ATTR_SBI_UI(extent_max_zeroout_kb, s_extent_max_zeroout_kb); EXT4_ATTR(trigger_fs_error, 0200, NULL, trigger_test_error); @@ -3763,7 +3801,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) sbi->s_err_report.data = (unsigned long) sb; /* Register extent status tree shrinker */ - ext4_es_register_shrinker(sb); + ext4_es_register_shrinker(sbi); err = percpu_counter_init(&sbi->s_freeclusters_counter, ext4_count_free_clusters(sb)); @@ -3787,7 +3825,6 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) } sbi->s_stripe = ext4_get_stripe_size(sbi); - sbi->s_max_writeback_mb_bump = 128; sbi->s_extent_max_zeroout_kb = 32; /* @@ -3915,12 +3952,20 @@ no_journal: * The maximum number of concurrent works can be high and * concurrency isn't really necessary. Limit it to 1. */ - EXT4_SB(sb)->dio_unwritten_wq = - alloc_workqueue("ext4-dio-unwritten", WQ_MEM_RECLAIM | WQ_UNBOUND, 1); - if (!EXT4_SB(sb)->dio_unwritten_wq) { - printk(KERN_ERR "EXT4-fs: failed to create DIO workqueue\n"); + EXT4_SB(sb)->rsv_conversion_wq = + alloc_workqueue("ext4-rsv-conversion", WQ_MEM_RECLAIM | WQ_UNBOUND, 1); + if (!EXT4_SB(sb)->rsv_conversion_wq) { + printk(KERN_ERR "EXT4-fs: failed to create workqueue\n"); ret = -ENOMEM; - goto failed_mount_wq; + goto failed_mount4; + } + + EXT4_SB(sb)->unrsv_conversion_wq = + alloc_workqueue("ext4-unrsv-conversion", WQ_MEM_RECLAIM | WQ_UNBOUND, 1); + if (!EXT4_SB(sb)->unrsv_conversion_wq) { + printk(KERN_ERR "EXT4-fs: failed to create workqueue\n"); + ret = -ENOMEM; + goto failed_mount4; } /* @@ -4074,14 +4119,17 @@ failed_mount4a: sb->s_root = NULL; failed_mount4: ext4_msg(sb, KERN_ERR, "mount failed"); - destroy_workqueue(EXT4_SB(sb)->dio_unwritten_wq); + if (EXT4_SB(sb)->rsv_conversion_wq) + destroy_workqueue(EXT4_SB(sb)->rsv_conversion_wq); + if (EXT4_SB(sb)->unrsv_conversion_wq) + destroy_workqueue(EXT4_SB(sb)->unrsv_conversion_wq); failed_mount_wq: if (sbi->s_journal) { jbd2_journal_destroy(sbi->s_journal); sbi->s_journal = NULL; } failed_mount3: - ext4_es_unregister_shrinker(sb); + ext4_es_unregister_shrinker(sbi); del_timer(&sbi->s_err_report); if (sbi->s_flex_groups) ext4_kvfree(sbi->s_flex_groups); @@ -4517,19 +4565,52 @@ static int ext4_sync_fs(struct super_block *sb, int wait) { int ret = 0; tid_t target; + bool needs_barrier = false; struct ext4_sb_info *sbi = EXT4_SB(sb); trace_ext4_sync_fs(sb, wait); - flush_workqueue(sbi->dio_unwritten_wq); + flush_workqueue(sbi->rsv_conversion_wq); + flush_workqueue(sbi->unrsv_conversion_wq); /* * Writeback quota in non-journalled quota case - journalled quota has * no dirty dquots */ dquot_writeback_dquots(sb, -1); + /* + * Data writeback is possible w/o journal transaction, so barrier must + * being sent at the end of the function. But we can skip it if + * transaction_commit will do it for us. + */ + target = jbd2_get_latest_transaction(sbi->s_journal); + if (wait && sbi->s_journal->j_flags & JBD2_BARRIER && + !jbd2_trans_will_send_data_barrier(sbi->s_journal, target)) + needs_barrier = true; + if (jbd2_journal_start_commit(sbi->s_journal, &target)) { if (wait) - jbd2_log_wait_commit(sbi->s_journal, target); + ret = jbd2_log_wait_commit(sbi->s_journal, target); + } + if (needs_barrier) { + int err; + err = blkdev_issue_flush(sb->s_bdev, GFP_KERNEL, NULL); + if (!ret) + ret = err; } + + return ret; +} + +static int ext4_sync_fs_nojournal(struct super_block *sb, int wait) +{ + int ret = 0; + + trace_ext4_sync_fs(sb, wait); + flush_workqueue(EXT4_SB(sb)->rsv_conversion_wq); + flush_workqueue(EXT4_SB(sb)->unrsv_conversion_wq); + dquot_writeback_dquots(sb, -1); + if (wait && test_opt(sb, BARRIER)) + ret = blkdev_issue_flush(sb->s_bdev, GFP_KERNEL, NULL); + return ret; } diff --git a/fs/f2fs/Kconfig b/fs/f2fs/Kconfig index fd27e7e6326e..e06e0995e00f 100644 --- a/fs/f2fs/Kconfig +++ b/fs/f2fs/Kconfig @@ -51,3 +51,15 @@ config F2FS_FS_POSIX_ACL Linux website <http://acl.bestbits.at/>. If you don't know what Access Control Lists are, say N + +config F2FS_FS_SECURITY + bool "F2FS Security Labels" + depends on F2FS_FS_XATTR + help + Security labels provide an access control facility to support Linux + Security Models (LSMs) accepted by AppArmor, SELinux, Smack and TOMOYO + Linux. This option enables an extended attribute handler for file + security labels in the f2fs filesystem, so that it requires enabling + the extended attribute support in advance. + + If you are not using a security module, say N. diff --git a/fs/f2fs/acl.c b/fs/f2fs/acl.c index 44abc2f286e0..b7826ec1b470 100644 --- a/fs/f2fs/acl.c +++ b/fs/f2fs/acl.c @@ -250,7 +250,7 @@ static int f2fs_set_acl(struct inode *inode, int type, struct posix_acl *acl) } } - error = f2fs_setxattr(inode, name_index, "", value, size); + error = f2fs_setxattr(inode, name_index, "", value, size, NULL); kfree(value); if (!error) diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c index b1de01da1a40..66a6b85a51d8 100644 --- a/fs/f2fs/checkpoint.c +++ b/fs/f2fs/checkpoint.c @@ -357,8 +357,8 @@ static struct page *validate_checkpoint(struct f2fs_sb_info *sbi, unsigned long blk_size = sbi->blocksize; struct f2fs_checkpoint *cp_block; unsigned long long cur_version = 0, pre_version = 0; - unsigned int crc = 0; size_t crc_offset; + __u32 crc = 0; /* Read the 1st cp block in this CP pack */ cp_page_1 = get_meta_page(sbi, cp_addr); @@ -369,7 +369,7 @@ static struct page *validate_checkpoint(struct f2fs_sb_info *sbi, if (crc_offset >= blk_size) goto invalid_cp1; - crc = *(unsigned int *)((unsigned char *)cp_block + crc_offset); + crc = le32_to_cpu(*((__u32 *)((unsigned char *)cp_block + crc_offset))); if (!f2fs_crc_valid(crc, cp_block, crc_offset)) goto invalid_cp1; @@ -384,7 +384,7 @@ static struct page *validate_checkpoint(struct f2fs_sb_info *sbi, if (crc_offset >= blk_size) goto invalid_cp2; - crc = *(unsigned int *)((unsigned char *)cp_block + crc_offset); + crc = le32_to_cpu(*((__u32 *)((unsigned char *)cp_block + crc_offset))); if (!f2fs_crc_valid(crc, cp_block, crc_offset)) goto invalid_cp2; @@ -450,13 +450,30 @@ fail_no_cp: return -EINVAL; } -void set_dirty_dir_page(struct inode *inode, struct page *page) +static int __add_dirty_inode(struct inode *inode, struct dir_inode_entry *new) { struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); struct list_head *head = &sbi->dir_inode_list; - struct dir_inode_entry *new; struct list_head *this; + list_for_each(this, head) { + struct dir_inode_entry *entry; + entry = list_entry(this, struct dir_inode_entry, list); + if (entry->inode == inode) + return -EEXIST; + } + list_add_tail(&new->list, head); +#ifdef CONFIG_F2FS_STAT_FS + sbi->n_dirty_dirs++; +#endif + return 0; +} + +void set_dirty_dir_page(struct inode *inode, struct page *page) +{ + struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); + struct dir_inode_entry *new; + if (!S_ISDIR(inode->i_mode)) return; retry: @@ -469,23 +486,31 @@ retry: INIT_LIST_HEAD(&new->list); spin_lock(&sbi->dir_inode_lock); - list_for_each(this, head) { - struct dir_inode_entry *entry; - entry = list_entry(this, struct dir_inode_entry, list); - if (entry->inode == inode) { - kmem_cache_free(inode_entry_slab, new); - goto out; - } - } - list_add_tail(&new->list, head); - sbi->n_dirty_dirs++; + if (__add_dirty_inode(inode, new)) + kmem_cache_free(inode_entry_slab, new); - BUG_ON(!S_ISDIR(inode->i_mode)); -out: inc_page_count(sbi, F2FS_DIRTY_DENTS); inode_inc_dirty_dents(inode); SetPagePrivate(page); + spin_unlock(&sbi->dir_inode_lock); +} +void add_dirty_dir_inode(struct inode *inode) +{ + struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); + struct dir_inode_entry *new; +retry: + new = kmem_cache_alloc(inode_entry_slab, GFP_NOFS); + if (!new) { + cond_resched(); + goto retry; + } + new->inode = inode; + INIT_LIST_HEAD(&new->list); + + spin_lock(&sbi->dir_inode_lock); + if (__add_dirty_inode(inode, new)) + kmem_cache_free(inode_entry_slab, new); spin_unlock(&sbi->dir_inode_lock); } @@ -499,8 +524,10 @@ void remove_dirty_dir_inode(struct inode *inode) return; spin_lock(&sbi->dir_inode_lock); - if (atomic_read(&F2FS_I(inode)->dirty_dents)) - goto out; + if (atomic_read(&F2FS_I(inode)->dirty_dents)) { + spin_unlock(&sbi->dir_inode_lock); + return; + } list_for_each(this, head) { struct dir_inode_entry *entry; @@ -508,12 +535,38 @@ void remove_dirty_dir_inode(struct inode *inode) if (entry->inode == inode) { list_del(&entry->list); kmem_cache_free(inode_entry_slab, entry); +#ifdef CONFIG_F2FS_STAT_FS sbi->n_dirty_dirs--; +#endif + break; + } + } + spin_unlock(&sbi->dir_inode_lock); + + /* Only from the recovery routine */ + if (is_inode_flag_set(F2FS_I(inode), FI_DELAY_IPUT)) { + clear_inode_flag(F2FS_I(inode), FI_DELAY_IPUT); + iput(inode); + } +} + +struct inode *check_dirty_dir_inode(struct f2fs_sb_info *sbi, nid_t ino) +{ + struct list_head *head = &sbi->dir_inode_list; + struct list_head *this; + struct inode *inode = NULL; + + spin_lock(&sbi->dir_inode_lock); + list_for_each(this, head) { + struct dir_inode_entry *entry; + entry = list_entry(this, struct dir_inode_entry, list); + if (entry->inode->i_ino == ino) { + inode = entry->inode; break; } } -out: spin_unlock(&sbi->dir_inode_lock); + return inode; } void sync_dirty_dir_inodes(struct f2fs_sb_info *sbi) @@ -595,7 +648,7 @@ static void do_checkpoint(struct f2fs_sb_info *sbi, bool is_umount) block_t start_blk; struct page *cp_page; unsigned int data_sum_blocks, orphan_blocks; - unsigned int crc32 = 0; + __u32 crc32 = 0; void *kaddr; int i; @@ -664,8 +717,8 @@ static void do_checkpoint(struct f2fs_sb_info *sbi, bool is_umount) get_nat_bitmap(sbi, __bitmap_ptr(sbi, NAT_BITMAP)); crc32 = f2fs_crc32(ckpt, le32_to_cpu(ckpt->checksum_offset)); - *(__le32 *)((unsigned char *)ckpt + - le32_to_cpu(ckpt->checksum_offset)) + *((__le32 *)((unsigned char *)ckpt + + le32_to_cpu(ckpt->checksum_offset))) = cpu_to_le32(crc32); start_blk = __start_cp_addr(sbi); diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index 91ff93b0b0f4..035f9a345cdf 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c @@ -68,7 +68,9 @@ static int check_extent_cache(struct inode *inode, pgoff_t pgofs, struct buffer_head *bh_result) { struct f2fs_inode_info *fi = F2FS_I(inode); +#ifdef CONFIG_F2FS_STAT_FS struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); +#endif pgoff_t start_fofs, end_fofs; block_t start_blkaddr; @@ -78,7 +80,9 @@ static int check_extent_cache(struct inode *inode, pgoff_t pgofs, return 0; } +#ifdef CONFIG_F2FS_STAT_FS sbi->total_hit_ext++; +#endif start_fofs = fi->ext.fofs; end_fofs = fi->ext.fofs + fi->ext.len - 1; start_blkaddr = fi->ext.blk_addr; @@ -96,7 +100,9 @@ static int check_extent_cache(struct inode *inode, pgoff_t pgofs, else bh_result->b_size = UINT_MAX; +#ifdef CONFIG_F2FS_STAT_FS sbi->read_hit_ext++; +#endif read_unlock(&fi->ext.ext_lock); return 1; } @@ -199,7 +205,7 @@ struct page *find_data_page(struct inode *inode, pgoff_t index, bool sync) if (dn.data_blkaddr == NEW_ADDR) return ERR_PTR(-EINVAL); - page = grab_cache_page(mapping, index); + page = grab_cache_page_write_begin(mapping, index, AOP_FLAG_NOFS); if (!page) return ERR_PTR(-ENOMEM); @@ -233,18 +239,23 @@ struct page *get_lock_data_page(struct inode *inode, pgoff_t index) struct page *page; int err; +repeat: + page = grab_cache_page_write_begin(mapping, index, AOP_FLAG_NOFS); + if (!page) + return ERR_PTR(-ENOMEM); + set_new_dnode(&dn, inode, NULL, NULL, 0); err = get_dnode_of_data(&dn, index, LOOKUP_NODE); - if (err) + if (err) { + f2fs_put_page(page, 1); return ERR_PTR(err); + } f2fs_put_dnode(&dn); - if (dn.data_blkaddr == NULL_ADDR) + if (dn.data_blkaddr == NULL_ADDR) { + f2fs_put_page(page, 1); return ERR_PTR(-ENOENT); -repeat: - page = grab_cache_page(mapping, index); - if (!page) - return ERR_PTR(-ENOMEM); + } if (PageUptodate(page)) return page; @@ -274,9 +285,10 @@ repeat: * * Also, caller should grab and release a mutex by calling mutex_lock_op() and * mutex_unlock_op(). + * Note that, npage is set only by make_empty_dir. */ -struct page *get_new_data_page(struct inode *inode, pgoff_t index, - bool new_i_size) +struct page *get_new_data_page(struct inode *inode, + struct page *npage, pgoff_t index, bool new_i_size) { struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); struct address_space *mapping = inode->i_mapping; @@ -284,18 +296,20 @@ struct page *get_new_data_page(struct inode *inode, pgoff_t index, struct dnode_of_data dn; int err; - set_new_dnode(&dn, inode, NULL, NULL, 0); + set_new_dnode(&dn, inode, npage, npage, 0); err = get_dnode_of_data(&dn, index, ALLOC_NODE); if (err) return ERR_PTR(err); if (dn.data_blkaddr == NULL_ADDR) { if (reserve_new_block(&dn)) { - f2fs_put_dnode(&dn); + if (!npage) + f2fs_put_dnode(&dn); return ERR_PTR(-ENOSPC); } } - f2fs_put_dnode(&dn); + if (!npage) + f2fs_put_dnode(&dn); repeat: page = grab_cache_page(mapping, index); if (!page) @@ -325,6 +339,8 @@ repeat: if (new_i_size && i_size_read(inode) < ((index + 1) << PAGE_CACHE_SHIFT)) { i_size_write(inode, ((index + 1) << PAGE_CACHE_SHIFT)); + /* Only the directory inode sets new_i_size */ + set_inode_flag(F2FS_I(inode), FI_UPDATE_DIR); mark_inode_dirty_sync(inode); } return page; @@ -481,8 +497,9 @@ int do_write_data_page(struct page *page) * If current allocation needs SSR, * it had better in-place writes for updated data. */ - if (old_blk_addr != NEW_ADDR && !is_cold_data(page) && - need_inplace_update(inode)) { + if (unlikely(old_blk_addr != NEW_ADDR && + !is_cold_data(page) && + need_inplace_update(inode))) { rewrite_data_page(F2FS_SB(inode->i_sb), page, old_blk_addr); } else { @@ -684,6 +701,27 @@ err: return err; } +static int f2fs_write_end(struct file *file, + struct address_space *mapping, + loff_t pos, unsigned len, unsigned copied, + struct page *page, void *fsdata) +{ + struct inode *inode = page->mapping->host; + + SetPageUptodate(page); + set_page_dirty(page); + + if (pos + copied > i_size_read(inode)) { + i_size_write(inode, pos + copied); + mark_inode_dirty(inode); + update_inode_page(inode); + } + + unlock_page(page); + page_cache_release(page); + return copied; +} + static ssize_t f2fs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, loff_t offset, unsigned long nr_segs) { @@ -698,7 +736,8 @@ static ssize_t f2fs_direct_IO(int rw, struct kiocb *iocb, get_data_block_ro); } -static void f2fs_invalidate_data_page(struct page *page, unsigned long offset) +static void f2fs_invalidate_data_page(struct page *page, unsigned int offset, + unsigned int length) { struct inode *inode = page->mapping->host; struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); @@ -740,7 +779,7 @@ const struct address_space_operations f2fs_dblock_aops = { .writepage = f2fs_write_data_page, .writepages = f2fs_write_data_pages, .write_begin = f2fs_write_begin, - .write_end = nobh_write_end, + .write_end = f2fs_write_end, .set_page_dirty = f2fs_set_data_page_dirty, .invalidatepage = f2fs_invalidate_data_page, .releasepage = f2fs_release_data_page, diff --git a/fs/f2fs/debug.c b/fs/f2fs/debug.c index 8d9943786c31..0d6c6aafb235 100644 --- a/fs/f2fs/debug.c +++ b/fs/f2fs/debug.c @@ -175,12 +175,12 @@ get_cache: static int stat_show(struct seq_file *s, void *v) { - struct f2fs_stat_info *si, *next; + struct f2fs_stat_info *si; int i = 0; int j; mutex_lock(&f2fs_stat_mutex); - list_for_each_entry_safe(si, next, &f2fs_stat_list, stat_list) { + list_for_each_entry(si, &f2fs_stat_list, stat_list) { char devname[BDEVNAME_SIZE]; update_general_status(si->sbi); diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c index 1ac6b93036b7..9d1cd423450d 100644 --- a/fs/f2fs/dir.c +++ b/fs/f2fs/dir.c @@ -13,6 +13,7 @@ #include "f2fs.h" #include "node.h" #include "acl.h" +#include "xattr.h" static unsigned long dir_blocks(struct inode *inode) { @@ -215,9 +216,9 @@ struct f2fs_dir_entry *f2fs_find_entry(struct inode *dir, struct f2fs_dir_entry *f2fs_parent_dir(struct inode *dir, struct page **p) { - struct page *page = NULL; - struct f2fs_dir_entry *de = NULL; - struct f2fs_dentry_block *dentry_blk = NULL; + struct page *page; + struct f2fs_dir_entry *de; + struct f2fs_dentry_block *dentry_blk; page = get_lock_data_page(dir, 0); if (IS_ERR(page)) @@ -264,15 +265,10 @@ void f2fs_set_link(struct inode *dir, struct f2fs_dir_entry *de, f2fs_put_page(page, 1); } -void init_dent_inode(const struct qstr *name, struct page *ipage) +static void init_dent_inode(const struct qstr *name, struct page *ipage) { struct f2fs_node *rn; - if (IS_ERR(ipage)) - return; - - wait_on_page_writeback(ipage); - /* copy name info. to this inode page */ rn = (struct f2fs_node *)page_address(ipage); rn->i.i_namelen = cpu_to_le32(name->len); @@ -280,14 +276,15 @@ void init_dent_inode(const struct qstr *name, struct page *ipage) set_page_dirty(ipage); } -static int make_empty_dir(struct inode *inode, struct inode *parent) +static int make_empty_dir(struct inode *inode, + struct inode *parent, struct page *page) { struct page *dentry_page; struct f2fs_dentry_block *dentry_blk; struct f2fs_dir_entry *de; void *kaddr; - dentry_page = get_new_data_page(inode, 0, true); + dentry_page = get_new_data_page(inode, page, 0, true); if (IS_ERR(dentry_page)) return PTR_ERR(dentry_page); @@ -317,63 +314,76 @@ static int make_empty_dir(struct inode *inode, struct inode *parent) return 0; } -static int init_inode_metadata(struct inode *inode, +static struct page *init_inode_metadata(struct inode *inode, struct inode *dir, const struct qstr *name) { + struct page *page; + int err; + if (is_inode_flag_set(F2FS_I(inode), FI_NEW_INODE)) { - int err; - err = new_inode_page(inode, name); - if (err) - return err; + page = new_inode_page(inode, name); + if (IS_ERR(page)) + return page; if (S_ISDIR(inode->i_mode)) { - err = make_empty_dir(inode, dir); - if (err) { - remove_inode_page(inode); - return err; - } + err = make_empty_dir(inode, dir, page); + if (err) + goto error; } err = f2fs_init_acl(inode, dir); - if (err) { - remove_inode_page(inode); - return err; - } + if (err) + goto error; + + err = f2fs_init_security(inode, dir, name, page); + if (err) + goto error; + + wait_on_page_writeback(page); } else { - struct page *ipage; - ipage = get_node_page(F2FS_SB(dir->i_sb), inode->i_ino); - if (IS_ERR(ipage)) - return PTR_ERR(ipage); - set_cold_node(inode, ipage); - init_dent_inode(name, ipage); - f2fs_put_page(ipage, 1); + page = get_node_page(F2FS_SB(dir->i_sb), inode->i_ino); + if (IS_ERR(page)) + return page; + + wait_on_page_writeback(page); + set_cold_node(inode, page); } + + init_dent_inode(name, page); + + /* + * This file should be checkpointed during fsync. + * We lost i_pino from now on. + */ if (is_inode_flag_set(F2FS_I(inode), FI_INC_LINK)) { + file_lost_pino(inode); inc_nlink(inode); - update_inode_page(inode); } - return 0; + return page; + +error: + f2fs_put_page(page, 1); + remove_inode_page(inode); + return ERR_PTR(err); } static void update_parent_metadata(struct inode *dir, struct inode *inode, unsigned int current_depth) { - bool need_dir_update = false; - if (is_inode_flag_set(F2FS_I(inode), FI_NEW_INODE)) { if (S_ISDIR(inode->i_mode)) { inc_nlink(dir); - need_dir_update = true; + set_inode_flag(F2FS_I(dir), FI_UPDATE_DIR); } clear_inode_flag(F2FS_I(inode), FI_NEW_INODE); } dir->i_mtime = dir->i_ctime = CURRENT_TIME; if (F2FS_I(dir)->i_current_depth != current_depth) { F2FS_I(dir)->i_current_depth = current_depth; - need_dir_update = true; + set_inode_flag(F2FS_I(dir), FI_UPDATE_DIR); } - if (need_dir_update) + if (is_inode_flag_set(F2FS_I(dir), FI_UPDATE_DIR)) update_inode_page(dir); else mark_inode_dirty(dir); @@ -423,6 +433,7 @@ int __f2fs_add_link(struct inode *dir, const struct qstr *name, struct inode *in struct page *dentry_page = NULL; struct f2fs_dentry_block *dentry_blk = NULL; int slots = GET_DENTRY_SLOTS(namelen); + struct page *page; int err = 0; int i; @@ -448,7 +459,7 @@ start: bidx = dir_block_index(level, (le32_to_cpu(dentry_hash) % nbucket)); for (block = bidx; block <= (bidx + nblock - 1); block++) { - dentry_page = get_new_data_page(dir, block, true); + dentry_page = get_new_data_page(dir, NULL, block, true); if (IS_ERR(dentry_page)) return PTR_ERR(dentry_page); @@ -465,12 +476,13 @@ start: ++level; goto start; add_dentry: - err = init_inode_metadata(inode, dir, name); - if (err) - goto fail; - wait_on_page_writeback(dentry_page); + page = init_inode_metadata(inode, dir, name); + if (IS_ERR(page)) { + err = PTR_ERR(page); + goto fail; + } de = &dentry_blk->dentry[bit_pos]; de->hash_code = dentry_hash; de->name_len = cpu_to_le16(namelen); @@ -481,11 +493,14 @@ add_dentry: test_and_set_bit_le(bit_pos + i, &dentry_blk->dentry_bitmap); set_page_dirty(dentry_page); - update_parent_metadata(dir, inode, current_depth); - - /* update parent inode number before releasing dentry page */ + /* we don't need to mark_inode_dirty now */ F2FS_I(inode)->i_pino = dir->i_ino; + update_inode(inode, page); + f2fs_put_page(page, 1); + + update_parent_metadata(dir, inode, current_depth); fail: + clear_inode_flag(F2FS_I(dir), FI_UPDATE_DIR); kunmap(dentry_page); f2fs_put_page(dentry_page, 1); return err; @@ -591,24 +606,19 @@ bool f2fs_empty_dir(struct inode *dir) return true; } -static int f2fs_readdir(struct file *file, void *dirent, filldir_t filldir) +static int f2fs_readdir(struct file *file, struct dir_context *ctx) { - unsigned long pos = file->f_pos; struct inode *inode = file_inode(file); unsigned long npages = dir_blocks(inode); - unsigned char *types = NULL; unsigned int bit_pos = 0, start_bit_pos = 0; - int over = 0; struct f2fs_dentry_block *dentry_blk = NULL; struct f2fs_dir_entry *de = NULL; struct page *dentry_page = NULL; - unsigned int n = 0; + unsigned int n = ((unsigned long)ctx->pos / NR_DENTRY_IN_BLOCK); unsigned char d_type = DT_UNKNOWN; int slots; - types = f2fs_filetype_table; - bit_pos = (pos % NR_DENTRY_IN_BLOCK); - n = (pos / NR_DENTRY_IN_BLOCK); + bit_pos = ((unsigned long)ctx->pos % NR_DENTRY_IN_BLOCK); for ( ; n < npages; n++) { dentry_page = get_lock_data_page(inode, n); @@ -618,31 +628,28 @@ static int f2fs_readdir(struct file *file, void *dirent, filldir_t filldir) start_bit_pos = bit_pos; dentry_blk = kmap(dentry_page); while (bit_pos < NR_DENTRY_IN_BLOCK) { - d_type = DT_UNKNOWN; bit_pos = find_next_bit_le(&dentry_blk->dentry_bitmap, NR_DENTRY_IN_BLOCK, bit_pos); if (bit_pos >= NR_DENTRY_IN_BLOCK) break; + ctx->pos += bit_pos - start_bit_pos; de = &dentry_blk->dentry[bit_pos]; - if (types && de->file_type < F2FS_FT_MAX) - d_type = types[de->file_type]; - - over = filldir(dirent, - dentry_blk->filename[bit_pos], - le16_to_cpu(de->name_len), - (n * NR_DENTRY_IN_BLOCK) + bit_pos, - le32_to_cpu(de->ino), d_type); - if (over) { - file->f_pos += bit_pos - start_bit_pos; + if (de->file_type < F2FS_FT_MAX) + d_type = f2fs_filetype_table[de->file_type]; + else + d_type = DT_UNKNOWN; + if (!dir_emit(ctx, + dentry_blk->filename[bit_pos], + le16_to_cpu(de->name_len), + le32_to_cpu(de->ino), d_type)) goto success; - } slots = GET_DENTRY_SLOTS(le16_to_cpu(de->name_len)); bit_pos += slots; } bit_pos = 0; - file->f_pos = (n + 1) * NR_DENTRY_IN_BLOCK; + ctx->pos = (n + 1) * NR_DENTRY_IN_BLOCK; kunmap(dentry_page); f2fs_put_page(dentry_page, 1); dentry_page = NULL; @@ -659,7 +666,7 @@ success: const struct file_operations f2fs_dir_operations = { .llseek = generic_file_llseek, .read = generic_read_dir, - .readdir = f2fs_readdir, + .iterate = f2fs_readdir, .fsync = f2fs_sync_file, .unlocked_ioctl = f2fs_ioctl, }; diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index 20aab02f2a42..467d42d65c48 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h @@ -37,21 +37,35 @@ typecheck(unsigned long long, b) && \ ((long long)((a) - (b)) > 0)) -typedef u64 block_t; +typedef u32 block_t; /* + * should not change u32, since it is the on-disk block + * address format, __le32. + */ typedef u32 nid_t; struct f2fs_mount_info { unsigned int opt; }; -static inline __u32 f2fs_crc32(void *buff, size_t len) +#define CRCPOLY_LE 0xedb88320 + +static inline __u32 f2fs_crc32(void *buf, size_t len) { - return crc32_le(F2FS_SUPER_MAGIC, buff, len); + unsigned char *p = (unsigned char *)buf; + __u32 crc = F2FS_SUPER_MAGIC; + int i; + + while (len--) { + crc ^= *p++; + for (i = 0; i < 8; i++) + crc = (crc >> 1) ^ ((crc & 1) ? CRCPOLY_LE : 0); + } + return crc; } -static inline bool f2fs_crc_valid(__u32 blk_crc, void *buff, size_t buff_size) +static inline bool f2fs_crc_valid(__u32 blk_crc, void *buf, size_t buf_size) { - return f2fs_crc32(buff, buff_size) == blk_crc; + return f2fs_crc32(buf, buf_size) == blk_crc; } /* @@ -148,7 +162,7 @@ struct extent_info { * i_advise uses FADVISE_XXX_BIT. We can add additional hints later. */ #define FADVISE_COLD_BIT 0x01 -#define FADVISE_CP_BIT 0x02 +#define FADVISE_LOST_PINO_BIT 0x02 struct f2fs_inode_info { struct inode vfs_inode; /* serve a vfs inode */ @@ -369,7 +383,6 @@ struct f2fs_sb_info { /* for directory inode management */ struct list_head dir_inode_list; /* dir inode list */ spinlock_t dir_inode_lock; /* for dir inode list lock */ - unsigned int n_dirty_dirs; /* # of dir inodes */ /* basic file system units */ unsigned int log_sectors_per_block; /* log2 sectors per block */ @@ -406,12 +419,15 @@ struct f2fs_sb_info { * for stat information. * one is for the LFS mode, and the other is for the SSR mode. */ +#ifdef CONFIG_F2FS_STAT_FS struct f2fs_stat_info *stat_info; /* FS status information */ unsigned int segment_count[2]; /* # of allocated segments */ unsigned int block_count[2]; /* # of allocated blocks */ - unsigned int last_victim[2]; /* last victim segment # */ int total_hit_ext, read_hit_ext; /* extent cache hit ratio */ int bg_gc; /* background gc calls */ + unsigned int n_dirty_dirs; /* # of dir inodes */ +#endif + unsigned int last_victim[2]; /* last victim segment # */ spinlock_t stat_lock; /* lock for stat operations */ }; @@ -495,9 +511,17 @@ static inline void clear_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f) static inline void mutex_lock_all(struct f2fs_sb_info *sbi) { - int i = 0; - for (; i < NR_GLOBAL_LOCKS; i++) - mutex_lock(&sbi->fs_lock[i]); + int i; + + for (i = 0; i < NR_GLOBAL_LOCKS; i++) { + /* + * This is the only time we take multiple fs_lock[] + * instances; the order is immaterial since we + * always hold cp_mutex, which serializes multiple + * such operations. + */ + mutex_lock_nest_lock(&sbi->fs_lock[i], &sbi->cp_mutex); + } } static inline void mutex_unlock_all(struct f2fs_sb_info *sbi) @@ -843,9 +867,12 @@ static inline int f2fs_clear_bit(unsigned int nr, char *addr) /* used for f2fs_inode_info->flags */ enum { FI_NEW_INODE, /* indicate newly allocated inode */ + FI_DIRTY_INODE, /* indicate inode is dirty or not */ FI_INC_LINK, /* need to increment i_nlink */ FI_ACL_MODE, /* indicate acl mode */ FI_NO_ALLOC, /* should not allocate any blocks */ + FI_UPDATE_DIR, /* should update inode block for consistency */ + FI_DELAY_IPUT, /* used for the recovery */ }; static inline void set_inode_flag(struct f2fs_inode_info *fi, int flag) @@ -878,14 +905,21 @@ static inline int cond_clear_inode_flag(struct f2fs_inode_info *fi, int flag) return 0; } +static inline int f2fs_readonly(struct super_block *sb) +{ + return sb->s_flags & MS_RDONLY; +} + /* * file.c */ int f2fs_sync_file(struct file *, loff_t, loff_t, int); void truncate_data_blocks(struct dnode_of_data *); void f2fs_truncate(struct inode *); +int f2fs_getattr(struct vfsmount *, struct dentry *, struct kstat *); int f2fs_setattr(struct dentry *, struct iattr *); int truncate_hole(struct inode *, pgoff_t, pgoff_t); +int truncate_data_blocks_range(struct dnode_of_data *, int); long f2fs_ioctl(struct file *, unsigned int, unsigned long); long f2fs_compat_ioctl(struct file *, unsigned int, unsigned long); @@ -913,7 +947,6 @@ struct f2fs_dir_entry *f2fs_parent_dir(struct inode *, struct page **); ino_t f2fs_inode_by_name(struct inode *, struct qstr *); void f2fs_set_link(struct inode *, struct f2fs_dir_entry *, struct page *, struct inode *); -void init_dent_inode(const struct qstr *, struct page *); int __f2fs_add_link(struct inode *, const struct qstr *, struct inode *); void f2fs_delete_entry(struct f2fs_dir_entry *, struct page *, struct inode *); int f2fs_make_empty(struct inode *, struct inode *); @@ -948,8 +981,8 @@ void get_node_info(struct f2fs_sb_info *, nid_t, struct node_info *); int get_dnode_of_data(struct dnode_of_data *, pgoff_t, int); int truncate_inode_blocks(struct inode *, pgoff_t); int remove_inode_page(struct inode *); -int new_inode_page(struct inode *, const struct qstr *); -struct page *new_node_page(struct dnode_of_data *, unsigned int); +struct page *new_inode_page(struct inode *, const struct qstr *); +struct page *new_node_page(struct dnode_of_data *, unsigned int, struct page *); void ra_node_page(struct f2fs_sb_info *, nid_t); struct page *get_node_page(struct f2fs_sb_info *, pgoff_t); struct page *get_node_page_ra(struct page *, int); @@ -974,7 +1007,6 @@ void destroy_node_manager_caches(void); */ void f2fs_balance_fs(struct f2fs_sb_info *); void invalidate_blocks(struct f2fs_sb_info *, block_t); -void locate_dirty_segment(struct f2fs_sb_info *, unsigned int); void clear_prefree_segments(struct f2fs_sb_info *); int npages_for_summary_flush(struct f2fs_sb_info *); void allocate_new_segments(struct f2fs_sb_info *); @@ -1011,7 +1043,9 @@ void remove_orphan_inode(struct f2fs_sb_info *, nid_t); int recover_orphan_inodes(struct f2fs_sb_info *); int get_valid_checkpoint(struct f2fs_sb_info *); void set_dirty_dir_page(struct inode *, struct page *); +void add_dirty_dir_inode(struct inode *); void remove_dirty_dir_inode(struct inode *); +struct inode *check_dirty_dir_inode(struct f2fs_sb_info *, nid_t); void sync_dirty_dir_inodes(struct f2fs_sb_info *); void write_checkpoint(struct f2fs_sb_info *, bool); void init_orphan_info(struct f2fs_sb_info *); @@ -1025,7 +1059,7 @@ int reserve_new_block(struct dnode_of_data *); void update_extent_cache(block_t, struct dnode_of_data *); struct page *find_data_page(struct inode *, pgoff_t, bool); struct page *get_lock_data_page(struct inode *, pgoff_t); -struct page *get_new_data_page(struct inode *, pgoff_t, bool); +struct page *get_new_data_page(struct inode *, struct page *, pgoff_t, bool); int f2fs_readpage(struct f2fs_sb_info *, struct page *, block_t, int); int do_write_data_page(struct page *); diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c index 1cae864f8dfc..d2d2b7dbdcc1 100644 --- a/fs/f2fs/file.c +++ b/fs/f2fs/file.c @@ -63,9 +63,10 @@ static int f2fs_vm_page_mkwrite(struct vm_area_struct *vma, f2fs_put_dnode(&dn); mutex_unlock_op(sbi, ilock); + file_update_time(vma->vm_file); lock_page(page); if (page->mapping != inode->i_mapping || - page_offset(page) >= i_size_read(inode) || + page_offset(page) > i_size_read(inode) || !PageUptodate(page)) { unlock_page(page); err = -EFAULT; @@ -76,10 +77,7 @@ static int f2fs_vm_page_mkwrite(struct vm_area_struct *vma, * check to see if the page is mapped already (no holes) */ if (PageMappedToDisk(page)) - goto out; - - /* fill the page */ - wait_on_page_writeback(page); + goto mapped; /* page is wholly or partially inside EOF */ if (((page->index + 1) << PAGE_CACHE_SHIFT) > i_size_read(inode)) { @@ -90,7 +88,9 @@ static int f2fs_vm_page_mkwrite(struct vm_area_struct *vma, set_page_dirty(page); SetPageUptodate(page); - file_update_time(vma->vm_file); +mapped: + /* fill the page */ + wait_on_page_writeback(page); out: sb_end_pagefault(inode->i_sb); return block_page_mkwrite_return(err); @@ -102,6 +102,24 @@ static const struct vm_operations_struct f2fs_file_vm_ops = { .remap_pages = generic_file_remap_pages, }; +static int get_parent_ino(struct inode *inode, nid_t *pino) +{ + struct dentry *dentry; + + inode = igrab(inode); + dentry = d_find_any_alias(inode); + iput(inode); + if (!dentry) + return 0; + + inode = igrab(dentry->d_parent->d_inode); + dput(dentry); + + *pino = inode->i_ino; + iput(inode); + return 1; +} + int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) { struct inode *inode = file->f_mapping->host; @@ -114,7 +132,7 @@ int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) .for_reclaim = 0, }; - if (inode->i_sb->s_flags & MS_RDONLY) + if (f2fs_readonly(inode->i_sb)) return 0; trace_f2fs_sync_file_enter(inode); @@ -134,7 +152,7 @@ int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) if (!S_ISREG(inode->i_mode) || inode->i_nlink != 1) need_cp = true; - else if (is_cp_file(inode)) + else if (file_wrong_pino(inode)) need_cp = true; else if (!space_for_roll_forward(sbi)) need_cp = true; @@ -142,11 +160,23 @@ int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) need_cp = true; if (need_cp) { + nid_t pino; + /* all the dirty node pages should be flushed for POR */ ret = f2fs_sync_fs(inode->i_sb, 1); + if (file_wrong_pino(inode) && inode->i_nlink == 1 && + get_parent_ino(inode, &pino)) { + F2FS_I(inode)->i_pino = pino; + file_got_pino(inode); + mark_inode_dirty_sync(inode); + ret = f2fs_write_inode(inode, NULL); + if (ret) + goto out; + } } else { /* if there is no written node page, write its inode page */ while (!sync_node_pages(sbi, inode->i_ino, &wbc)) { + mark_inode_dirty_sync(inode); ret = f2fs_write_inode(inode, NULL); if (ret) goto out; @@ -168,7 +198,7 @@ static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma) return 0; } -static int truncate_data_blocks_range(struct dnode_of_data *dn, int count) +int truncate_data_blocks_range(struct dnode_of_data *dn, int count) { int nr_free = 0, ofs = dn->ofs_in_node; struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb); @@ -185,10 +215,10 @@ static int truncate_data_blocks_range(struct dnode_of_data *dn, int count) update_extent_cache(NULL_ADDR, dn); invalidate_blocks(sbi, blkaddr); - dec_valid_block_count(sbi, dn->inode, 1); nr_free++; } if (nr_free) { + dec_valid_block_count(sbi, dn->inode, nr_free); set_page_dirty(dn->node_page); sync_inode_page(dn); } @@ -291,7 +321,7 @@ void f2fs_truncate(struct inode *inode) } } -static int f2fs_getattr(struct vfsmount *mnt, +int f2fs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) { struct inode *inode = dentry->d_inode; @@ -387,7 +417,7 @@ static void fill_zero(struct inode *inode, pgoff_t index, f2fs_balance_fs(sbi); ilock = mutex_lock_op(sbi); - page = get_new_data_page(inode, index, false); + page = get_new_data_page(inode, NULL, index, false); mutex_unlock_op(sbi, ilock); if (!IS_ERR(page)) { @@ -575,10 +605,10 @@ long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) int ret; switch (cmd) { - case FS_IOC_GETFLAGS: + case F2FS_IOC_GETFLAGS: flags = fi->i_flags & FS_FL_USER_VISIBLE; return put_user(flags, (int __user *) arg); - case FS_IOC_SETFLAGS: + case F2FS_IOC_SETFLAGS: { unsigned int oldflags; diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c index 14961593e93c..35f9b1a196aa 100644 --- a/fs/f2fs/gc.c +++ b/fs/f2fs/gc.c @@ -76,7 +76,9 @@ static int gc_thread_func(void *data) else wait_ms = increase_sleep_time(wait_ms); +#ifdef CONFIG_F2FS_STAT_FS sbi->bg_gc++; +#endif /* if return value is not zero, no victim was selected */ if (f2fs_gc(sbi)) @@ -89,23 +91,28 @@ int start_gc_thread(struct f2fs_sb_info *sbi) { struct f2fs_gc_kthread *gc_th; dev_t dev = sbi->sb->s_bdev->bd_dev; + int err = 0; if (!test_opt(sbi, BG_GC)) - return 0; + goto out; gc_th = kmalloc(sizeof(struct f2fs_gc_kthread), GFP_KERNEL); - if (!gc_th) - return -ENOMEM; + if (!gc_th) { + err = -ENOMEM; + goto out; + } sbi->gc_thread = gc_th; init_waitqueue_head(&sbi->gc_thread->gc_wait_queue_head); sbi->gc_thread->f2fs_gc_task = kthread_run(gc_thread_func, sbi, "f2fs_gc-%u:%u", MAJOR(dev), MINOR(dev)); if (IS_ERR(gc_th->f2fs_gc_task)) { + err = PTR_ERR(gc_th->f2fs_gc_task); kfree(gc_th); sbi->gc_thread = NULL; - return -ENOMEM; } - return 0; + +out: + return err; } void stop_gc_thread(struct f2fs_sb_info *sbi) @@ -234,14 +241,14 @@ static int get_victim_by_default(struct f2fs_sb_info *sbi, { struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); struct victim_sel_policy p; - unsigned int secno; + unsigned int secno, max_cost; int nsearched = 0; p.alloc_mode = alloc_mode; select_policy(sbi, gc_type, type, &p); p.min_segno = NULL_SEGNO; - p.min_cost = get_max_cost(sbi, &p); + p.min_cost = max_cost = get_max_cost(sbi, &p); mutex_lock(&dirty_i->seglist_lock); @@ -280,7 +287,7 @@ static int get_victim_by_default(struct f2fs_sb_info *sbi, p.min_cost = cost; } - if (cost == get_max_cost(sbi, &p)) + if (cost == max_cost) continue; if (nsearched++ >= MAX_VICTIM_SEARCH) { @@ -288,8 +295,8 @@ static int get_victim_by_default(struct f2fs_sb_info *sbi, break; } } -got_it: if (p.min_segno != NULL_SEGNO) { +got_it: if (p.alloc_mode == LFS) { secno = GET_SECNO(sbi, p.min_segno); if (gc_type == FG_GC) @@ -314,28 +321,21 @@ static const struct victim_selection default_v_ops = { static struct inode *find_gc_inode(nid_t ino, struct list_head *ilist) { - struct list_head *this; struct inode_entry *ie; - list_for_each(this, ilist) { - ie = list_entry(this, struct inode_entry, list); + list_for_each_entry(ie, ilist, list) if (ie->inode->i_ino == ino) return ie->inode; - } return NULL; } static void add_gc_inode(struct inode *inode, struct list_head *ilist) { - struct list_head *this; - struct inode_entry *new_ie, *ie; + struct inode_entry *new_ie; - list_for_each(this, ilist) { - ie = list_entry(this, struct inode_entry, list); - if (ie->inode == inode) { - iput(inode); - return; - } + if (inode == find_gc_inode(inode->i_ino, ilist)) { + iput(inode); + return; } repeat: new_ie = kmem_cache_alloc(winode_slab, GFP_NOFS); diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c index 91ac7f9d88ee..2b2d45d19e3e 100644 --- a/fs/f2fs/inode.c +++ b/fs/f2fs/inode.c @@ -109,12 +109,6 @@ struct inode *f2fs_iget(struct super_block *sb, unsigned long ino) ret = do_read_inode(inode); if (ret) goto bad_inode; - - if (!sbi->por_doing && inode->i_nlink == 0) { - ret = -ENOENT; - goto bad_inode; - } - make_now: if (ino == F2FS_NODE_INO(sbi)) { inode->i_mapping->a_ops = &f2fs_node_aops; @@ -130,8 +124,7 @@ make_now: inode->i_op = &f2fs_dir_inode_operations; inode->i_fop = &f2fs_dir_operations; inode->i_mapping->a_ops = &f2fs_dblock_aops; - mapping_set_gfp_mask(inode->i_mapping, GFP_HIGHUSER_MOVABLE | - __GFP_ZERO); + mapping_set_gfp_mask(inode->i_mapping, GFP_F2FS_ZERO); } else if (S_ISLNK(inode->i_mode)) { inode->i_op = &f2fs_symlink_inode_operations; inode->i_mapping->a_ops = &f2fs_dblock_aops; @@ -199,6 +192,7 @@ void update_inode(struct inode *inode, struct page *node_page) set_cold_node(inode, node_page); set_page_dirty(node_page); + clear_inode_flag(F2FS_I(inode), FI_DIRTY_INODE); } int update_inode_page(struct inode *inode) @@ -224,6 +218,9 @@ int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc) inode->i_ino == F2FS_META_INO(sbi)) return 0; + if (!is_inode_flag_set(F2FS_I(inode), FI_DIRTY_INODE)) + return 0; + if (wbc) f2fs_balance_fs(sbi); diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c index 47abc9722b17..64c07169df05 100644 --- a/fs/f2fs/namei.c +++ b/fs/f2fs/namei.c @@ -112,7 +112,7 @@ static inline void set_cold_files(struct f2fs_sb_info *sbi, struct inode *inode, int count = le32_to_cpu(sbi->raw_super->extension_count); for (i = 0; i < count; i++) { if (is_multimedia_file(name, extlist[i])) { - set_cold_file(inode); + file_set_cold(inode); break; } } @@ -149,8 +149,7 @@ static int f2fs_create(struct inode *dir, struct dentry *dentry, umode_t mode, alloc_nid_done(sbi, ino); - if (!sbi->por_doing) - d_instantiate(dentry, inode); + d_instantiate(dentry, inode); unlock_new_inode(inode); return 0; out: @@ -173,7 +172,7 @@ static int f2fs_link(struct dentry *old_dentry, struct inode *dir, f2fs_balance_fs(sbi); inode->i_ctime = CURRENT_TIME; - atomic_inc(&inode->i_count); + ihold(inode); set_inode_flag(F2FS_I(inode), FI_INC_LINK); ilock = mutex_lock_op(sbi); @@ -182,17 +181,10 @@ static int f2fs_link(struct dentry *old_dentry, struct inode *dir, if (err) goto out; - /* - * This file should be checkpointed during fsync. - * We lost i_pino from now on. - */ - set_cp_file(inode); - d_instantiate(dentry, inode); return 0; out: clear_inode_flag(F2FS_I(inode), FI_INC_LINK); - make_bad_inode(inode); iput(inode); return err; } @@ -498,6 +490,7 @@ const struct inode_operations f2fs_dir_inode_operations = { .rmdir = f2fs_rmdir, .mknod = f2fs_mknod, .rename = f2fs_rename, + .getattr = f2fs_getattr, .setattr = f2fs_setattr, .get_acl = f2fs_get_acl, #ifdef CONFIG_F2FS_FS_XATTR @@ -512,6 +505,7 @@ const struct inode_operations f2fs_symlink_inode_operations = { .readlink = generic_readlink, .follow_link = page_follow_link_light, .put_link = page_put_link, + .getattr = f2fs_getattr, .setattr = f2fs_setattr, #ifdef CONFIG_F2FS_FS_XATTR .setxattr = generic_setxattr, @@ -522,6 +516,7 @@ const struct inode_operations f2fs_symlink_inode_operations = { }; const struct inode_operations f2fs_special_inode_operations = { + .getattr = f2fs_getattr, .setattr = f2fs_setattr, .get_acl = f2fs_get_acl, #ifdef CONFIG_F2FS_FS_XATTR diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c index 3df43b4efd89..b418aee09573 100644 --- a/fs/f2fs/node.c +++ b/fs/f2fs/node.c @@ -408,10 +408,13 @@ int get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode) level = get_node_path(index, offset, noffset); nids[0] = dn->inode->i_ino; - npage[0] = get_node_page(sbi, nids[0]); - if (IS_ERR(npage[0])) - return PTR_ERR(npage[0]); + npage[0] = dn->inode_page; + if (!npage[0]) { + npage[0] = get_node_page(sbi, nids[0]); + if (IS_ERR(npage[0])) + return PTR_ERR(npage[0]); + } parent = npage[0]; if (level != 0) nids[1] = get_nid(parent, offset[0], true); @@ -430,7 +433,7 @@ int get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode) } dn->nid = nids[i]; - npage[i] = new_node_page(dn, noffset[i]); + npage[i] = new_node_page(dn, noffset[i], NULL); if (IS_ERR(npage[i])) { alloc_nid_failed(sbi, nids[i]); err = PTR_ERR(npage[i]); @@ -803,22 +806,19 @@ int remove_inode_page(struct inode *inode) return 0; } -int new_inode_page(struct inode *inode, const struct qstr *name) +struct page *new_inode_page(struct inode *inode, const struct qstr *name) { - struct page *page; struct dnode_of_data dn; /* allocate inode page for new inode */ set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino); - page = new_node_page(&dn, 0); - init_dent_inode(name, page); - if (IS_ERR(page)) - return PTR_ERR(page); - f2fs_put_page(page, 1); - return 0; + + /* caller should f2fs_put_page(page, 1); */ + return new_node_page(&dn, 0, NULL); } -struct page *new_node_page(struct dnode_of_data *dn, unsigned int ofs) +struct page *new_node_page(struct dnode_of_data *dn, + unsigned int ofs, struct page *ipage) { struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb); struct address_space *mapping = sbi->node_inode->i_mapping; @@ -851,7 +851,10 @@ struct page *new_node_page(struct dnode_of_data *dn, unsigned int ofs) set_cold_node(dn->inode, page); dn->node_page = page; - sync_inode_page(dn); + if (ipage) + update_inode(dn->inode, ipage); + else + sync_inode_page(dn); set_page_dirty(page); if (ofs == 0) inc_valid_inode_count(sbi); @@ -1205,7 +1208,8 @@ static int f2fs_set_node_page_dirty(struct page *page) return 0; } -static void f2fs_invalidate_node_page(struct page *page, unsigned long offset) +static void f2fs_invalidate_node_page(struct page *page, unsigned int offset, + unsigned int length) { struct inode *inode = page->mapping->host; struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); @@ -1492,9 +1496,10 @@ int recover_inode_page(struct f2fs_sb_info *sbi, struct page *page) new_ni = old_ni; new_ni.ino = ino; + if (!inc_valid_node_count(sbi, NULL, 1)) + WARN_ON(1); set_node_addr(sbi, &new_ni, NEW_ADDR); inc_valid_inode_count(sbi); - f2fs_put_page(ipage, 1); return 0; } diff --git a/fs/f2fs/node.h b/fs/f2fs/node.h index 0a2d72f0024d..c65fb4f4230f 100644 --- a/fs/f2fs/node.h +++ b/fs/f2fs/node.h @@ -275,25 +275,27 @@ static inline nid_t get_nid(struct page *p, int off, bool i) * - Mark cold node blocks in their node footer * - Mark cold data pages in page cache */ -static inline int is_cold_file(struct inode *inode) +static inline int is_file(struct inode *inode, int type) { - return F2FS_I(inode)->i_advise & FADVISE_COLD_BIT; + return F2FS_I(inode)->i_advise & type; } -static inline void set_cold_file(struct inode *inode) +static inline void set_file(struct inode *inode, int type) { - F2FS_I(inode)->i_advise |= FADVISE_COLD_BIT; + F2FS_I(inode)->i_advise |= type; } -static inline int is_cp_file(struct inode *inode) +static inline void clear_file(struct inode *inode, int type) { - return F2FS_I(inode)->i_advise & FADVISE_CP_BIT; + F2FS_I(inode)->i_advise &= ~type; } -static inline void set_cp_file(struct inode *inode) -{ - F2FS_I(inode)->i_advise |= FADVISE_CP_BIT; -} +#define file_is_cold(inode) is_file(inode, FADVISE_COLD_BIT) +#define file_wrong_pino(inode) is_file(inode, FADVISE_LOST_PINO_BIT) +#define file_set_cold(inode) set_file(inode, FADVISE_COLD_BIT) +#define file_lost_pino(inode) set_file(inode, FADVISE_LOST_PINO_BIT) +#define file_clear_cold(inode) clear_file(inode, FADVISE_COLD_BIT) +#define file_got_pino(inode) clear_file(inode, FADVISE_LOST_PINO_BIT) static inline int is_cold_data(struct page *page) { @@ -310,29 +312,16 @@ static inline void clear_cold_data(struct page *page) ClearPageChecked(page); } -static inline int is_cold_node(struct page *page) +static inline int is_node(struct page *page, int type) { void *kaddr = page_address(page); struct f2fs_node *rn = (struct f2fs_node *)kaddr; - unsigned int flag = le32_to_cpu(rn->footer.flag); - return flag & (0x1 << COLD_BIT_SHIFT); + return le32_to_cpu(rn->footer.flag) & (1 << type); } -static inline unsigned char is_fsync_dnode(struct page *page) -{ - void *kaddr = page_address(page); - struct f2fs_node *rn = (struct f2fs_node *)kaddr; - unsigned int flag = le32_to_cpu(rn->footer.flag); - return flag & (0x1 << FSYNC_BIT_SHIFT); -} - -static inline unsigned char is_dent_dnode(struct page *page) -{ - void *kaddr = page_address(page); - struct f2fs_node *rn = (struct f2fs_node *)kaddr; - unsigned int flag = le32_to_cpu(rn->footer.flag); - return flag & (0x1 << DENT_BIT_SHIFT); -} +#define is_cold_node(page) is_node(page, COLD_BIT_SHIFT) +#define is_fsync_dnode(page) is_node(page, FSYNC_BIT_SHIFT) +#define is_dent_dnode(page) is_node(page, DENT_BIT_SHIFT) static inline void set_cold_node(struct inode *inode, struct page *page) { @@ -346,26 +335,15 @@ static inline void set_cold_node(struct inode *inode, struct page *page) rn->footer.flag = cpu_to_le32(flag); } -static inline void set_fsync_mark(struct page *page, int mark) +static inline void set_mark(struct page *page, int mark, int type) { - void *kaddr = page_address(page); - struct f2fs_node *rn = (struct f2fs_node *)kaddr; - unsigned int flag = le32_to_cpu(rn->footer.flag); - if (mark) - flag |= (0x1 << FSYNC_BIT_SHIFT); - else - flag &= ~(0x1 << FSYNC_BIT_SHIFT); - rn->footer.flag = cpu_to_le32(flag); -} - -static inline void set_dentry_mark(struct page *page, int mark) -{ - void *kaddr = page_address(page); - struct f2fs_node *rn = (struct f2fs_node *)kaddr; + struct f2fs_node *rn = (struct f2fs_node *)page_address(page); unsigned int flag = le32_to_cpu(rn->footer.flag); if (mark) - flag |= (0x1 << DENT_BIT_SHIFT); + flag |= (0x1 << type); else - flag &= ~(0x1 << DENT_BIT_SHIFT); + flag &= ~(0x1 << type); rn->footer.flag = cpu_to_le32(flag); } +#define set_dentry_mark(page, mark) set_mark(page, mark, DENT_BIT_SHIFT) +#define set_fsync_mark(page, mark) set_mark(page, mark, FSYNC_BIT_SHIFT) diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c index 60c8a5097058..d56d951c2253 100644 --- a/fs/f2fs/recovery.c +++ b/fs/f2fs/recovery.c @@ -40,36 +40,54 @@ static struct fsync_inode_entry *get_fsync_inode(struct list_head *head, static int recover_dentry(struct page *ipage, struct inode *inode) { - struct f2fs_node *raw_node = (struct f2fs_node *)kmap(ipage); + void *kaddr = page_address(ipage); + struct f2fs_node *raw_node = (struct f2fs_node *)kaddr; struct f2fs_inode *raw_inode = &(raw_node->i); - struct qstr name; + nid_t pino = le32_to_cpu(raw_inode->i_pino); struct f2fs_dir_entry *de; + struct qstr name; struct page *page; - struct inode *dir; + struct inode *dir, *einode; int err = 0; - if (!is_dent_dnode(ipage)) - goto out; - - dir = f2fs_iget(inode->i_sb, le32_to_cpu(raw_inode->i_pino)); - if (IS_ERR(dir)) { - err = PTR_ERR(dir); - goto out; + dir = check_dirty_dir_inode(F2FS_SB(inode->i_sb), pino); + if (!dir) { + dir = f2fs_iget(inode->i_sb, pino); + if (IS_ERR(dir)) { + err = PTR_ERR(dir); + goto out; + } + set_inode_flag(F2FS_I(dir), FI_DELAY_IPUT); + add_dirty_dir_inode(dir); } name.len = le32_to_cpu(raw_inode->i_namelen); name.name = raw_inode->i_name; - +retry: de = f2fs_find_entry(dir, &name, &page); - if (de) { + if (de && inode->i_ino == le32_to_cpu(de->ino)) { kunmap(page); f2fs_put_page(page, 0); - } else { - err = __f2fs_add_link(dir, &name, inode); + goto out; + } + if (de) { + einode = f2fs_iget(inode->i_sb, le32_to_cpu(de->ino)); + if (IS_ERR(einode)) { + WARN_ON(1); + if (PTR_ERR(einode) == -ENOENT) + err = -EEXIST; + goto out; + } + f2fs_delete_entry(de, page, einode); + iput(einode); + goto retry; } - iput(dir); + err = __f2fs_add_link(dir, &name, inode); out: - kunmap(ipage); + f2fs_msg(inode->i_sb, KERN_NOTICE, "recover_inode and its dentry: " + "ino = %x, name = %s, dir = %lx, err = %d", + ino_of_node(ipage), raw_inode->i_name, + IS_ERR(dir) ? 0 : dir->i_ino, err); return err; } @@ -79,6 +97,9 @@ static int recover_inode(struct inode *inode, struct page *node_page) struct f2fs_node *raw_node = (struct f2fs_node *)kaddr; struct f2fs_inode *raw_inode = &(raw_node->i); + if (!IS_INODE(node_page)) + return 0; + inode->i_mode = le16_to_cpu(raw_inode->i_mode); i_size_write(inode, le64_to_cpu(raw_inode->i_size)); inode->i_atime.tv_sec = le64_to_cpu(raw_inode->i_mtime); @@ -88,7 +109,12 @@ static int recover_inode(struct inode *inode, struct page *node_page) inode->i_ctime.tv_nsec = le32_to_cpu(raw_inode->i_ctime_nsec); inode->i_mtime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec); - return recover_dentry(node_page, inode); + if (is_dent_dnode(node_page)) + return recover_dentry(node_page, inode); + + f2fs_msg(inode->i_sb, KERN_NOTICE, "recover_inode: ino = %x, name = %s", + ino_of_node(node_page), raw_inode->i_name); + return 0; } static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head) @@ -119,14 +145,13 @@ static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head) lock_page(page); if (cp_ver != cpver_of_node(page)) - goto unlock_out; + break; if (!is_fsync_dnode(page)) goto next; entry = get_fsync_inode(head, ino_of_node(page)); if (entry) { - entry->blkaddr = blkaddr; if (IS_INODE(page) && is_dent_dnode(page)) set_inode_flag(F2FS_I(entry->inode), FI_INC_LINK); @@ -134,48 +159,40 @@ static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head) if (IS_INODE(page) && is_dent_dnode(page)) { err = recover_inode_page(sbi, page); if (err) - goto unlock_out; + break; } /* add this fsync inode to the list */ entry = kmem_cache_alloc(fsync_entry_slab, GFP_NOFS); if (!entry) { err = -ENOMEM; - goto unlock_out; + break; } entry->inode = f2fs_iget(sbi->sb, ino_of_node(page)); if (IS_ERR(entry->inode)) { err = PTR_ERR(entry->inode); kmem_cache_free(fsync_entry_slab, entry); - goto unlock_out; + break; } - list_add_tail(&entry->list, head); - entry->blkaddr = blkaddr; - } - if (IS_INODE(page)) { - err = recover_inode(entry->inode, page); - if (err == -ENOENT) { - goto next; - } else if (err) { - err = -EINVAL; - goto unlock_out; - } } + entry->blkaddr = blkaddr; + + err = recover_inode(entry->inode, page); + if (err && err != -ENOENT) + break; next: /* check next segment */ blkaddr = next_blkaddr_of_node(page); } -unlock_out: unlock_page(page); out: __free_pages(page, 0); return err; } -static void destroy_fsync_dnodes(struct f2fs_sb_info *sbi, - struct list_head *head) +static void destroy_fsync_dnodes(struct list_head *head) { struct fsync_inode_entry *entry, *tmp; @@ -186,15 +203,15 @@ static void destroy_fsync_dnodes(struct f2fs_sb_info *sbi, } } -static void check_index_in_prev_nodes(struct f2fs_sb_info *sbi, - block_t blkaddr) +static int check_index_in_prev_nodes(struct f2fs_sb_info *sbi, + block_t blkaddr, struct dnode_of_data *dn) { struct seg_entry *sentry; unsigned int segno = GET_SEGNO(sbi, blkaddr); unsigned short blkoff = GET_SEGOFF_FROM_SEG0(sbi, blkaddr) & (sbi->blocks_per_seg - 1); struct f2fs_summary sum; - nid_t ino; + nid_t ino, nid; void *kaddr; struct inode *inode; struct page *node_page; @@ -203,7 +220,7 @@ static void check_index_in_prev_nodes(struct f2fs_sb_info *sbi, sentry = get_seg_entry(sbi, segno); if (!f2fs_test_bit(blkoff, sentry->cur_valid_map)) - return; + return 0; /* Get the previous summary */ for (i = CURSEG_WARM_DATA; i <= CURSEG_COLD_DATA; i++) { @@ -222,20 +239,39 @@ static void check_index_in_prev_nodes(struct f2fs_sb_info *sbi, f2fs_put_page(sum_page, 1); } + /* Use the locked dnode page and inode */ + nid = le32_to_cpu(sum.nid); + if (dn->inode->i_ino == nid) { + struct dnode_of_data tdn = *dn; + tdn.nid = nid; + tdn.node_page = dn->inode_page; + tdn.ofs_in_node = le16_to_cpu(sum.ofs_in_node); + truncate_data_blocks_range(&tdn, 1); + return 0; + } else if (dn->nid == nid) { + struct dnode_of_data tdn = *dn; + tdn.ofs_in_node = le16_to_cpu(sum.ofs_in_node); + truncate_data_blocks_range(&tdn, 1); + return 0; + } + /* Get the node page */ - node_page = get_node_page(sbi, le32_to_cpu(sum.nid)); + node_page = get_node_page(sbi, nid); + if (IS_ERR(node_page)) + return PTR_ERR(node_page); bidx = start_bidx_of_node(ofs_of_node(node_page)) + - le16_to_cpu(sum.ofs_in_node); + le16_to_cpu(sum.ofs_in_node); ino = ino_of_node(node_page); f2fs_put_page(node_page, 1); /* Deallocate previous index in the node page */ inode = f2fs_iget(sbi->sb, ino); if (IS_ERR(inode)) - return; + return PTR_ERR(inode); truncate_hole(inode, bidx, bidx + 1); iput(inode); + return 0; } static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode, @@ -245,7 +281,7 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode, struct dnode_of_data dn; struct f2fs_summary sum; struct node_info ni; - int err = 0; + int err = 0, recovered = 0; int ilock; start = start_bidx_of_node(ofs_of_node(page)); @@ -283,13 +319,16 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode, } /* Check the previous node page having this index */ - check_index_in_prev_nodes(sbi, dest); + err = check_index_in_prev_nodes(sbi, dest, &dn); + if (err) + goto err; set_summary(&sum, dn.nid, dn.ofs_in_node, ni.version); /* write dummy data page */ recover_data_page(sbi, NULL, &sum, src, dest); update_extent_cache(dest, &dn); + recovered++; } dn.ofs_in_node++; } @@ -305,9 +344,14 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode, set_page_dirty(dn.node_page); recover_node_page(sbi, dn.node_page, &sum, &ni, blkaddr); +err: f2fs_put_dnode(&dn); mutex_unlock_op(sbi, ilock); - return 0; + + f2fs_msg(sbi->sb, KERN_NOTICE, "recover_data: ino = %lx, " + "recovered_data = %d blocks, err = %d", + inode->i_ino, recovered, err); + return err; } static int recover_data(struct f2fs_sb_info *sbi, @@ -340,7 +384,7 @@ static int recover_data(struct f2fs_sb_info *sbi, lock_page(page); if (cp_ver != cpver_of_node(page)) - goto unlock_out; + break; entry = get_fsync_inode(head, ino_of_node(page)); if (!entry) @@ -348,7 +392,7 @@ static int recover_data(struct f2fs_sb_info *sbi, err = do_recover_data(sbi, entry->inode, page, blkaddr); if (err) - goto out; + break; if (entry->blkaddr == blkaddr) { iput(entry->inode); @@ -359,7 +403,6 @@ next: /* check next segment */ blkaddr = next_blkaddr_of_node(page); } -unlock_out: unlock_page(page); out: __free_pages(page, 0); @@ -382,6 +425,7 @@ int recover_fsync_data(struct f2fs_sb_info *sbi) INIT_LIST_HEAD(&inode_list); /* step #1: find fsynced inode numbers */ + sbi->por_doing = 1; err = find_fsync_dnodes(sbi, &inode_list); if (err) goto out; @@ -390,13 +434,13 @@ int recover_fsync_data(struct f2fs_sb_info *sbi) goto out; /* step #2: recover data */ - sbi->por_doing = 1; err = recover_data(sbi, &inode_list, CURSEG_WARM_NODE); - sbi->por_doing = 0; BUG_ON(!list_empty(&inode_list)); out: - destroy_fsync_dnodes(sbi, &inode_list); + destroy_fsync_dnodes(&inode_list); kmem_cache_destroy(fsync_entry_slab); - write_checkpoint(sbi, false); + sbi->por_doing = 0; + if (!err) + write_checkpoint(sbi, false); return err; } diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c index d8e84e49a5c3..a86d125a9885 100644 --- a/fs/f2fs/segment.c +++ b/fs/f2fs/segment.c @@ -94,7 +94,7 @@ static void __remove_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno, * Adding dirty entry into seglist is not critical operation. * If a given segment is one of current working segments, it won't be added. */ -void locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno) +static void locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno) { struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); unsigned short valid_blocks; @@ -126,17 +126,16 @@ void locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno) static void set_prefree_as_free_segments(struct f2fs_sb_info *sbi) { struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); - unsigned int segno, offset = 0; + unsigned int segno = -1; unsigned int total_segs = TOTAL_SEGS(sbi); mutex_lock(&dirty_i->seglist_lock); while (1) { segno = find_next_bit(dirty_i->dirty_segmap[PRE], total_segs, - offset); + segno + 1); if (segno >= total_segs) break; __set_test_and_free(sbi, segno); - offset = segno + 1; } mutex_unlock(&dirty_i->seglist_lock); } @@ -144,17 +143,16 @@ static void set_prefree_as_free_segments(struct f2fs_sb_info *sbi) void clear_prefree_segments(struct f2fs_sb_info *sbi) { struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); - unsigned int segno, offset = 0; + unsigned int segno = -1; unsigned int total_segs = TOTAL_SEGS(sbi); mutex_lock(&dirty_i->seglist_lock); while (1) { segno = find_next_bit(dirty_i->dirty_segmap[PRE], total_segs, - offset); + segno + 1); if (segno >= total_segs) break; - offset = segno + 1; if (test_and_clear_bit(segno, dirty_i->dirty_segmap[PRE])) dirty_i->nr_dirty[PRE]--; @@ -257,11 +255,11 @@ void invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr) * This function should be resided under the curseg_mutex lock */ static void __add_sum_entry(struct f2fs_sb_info *sbi, int type, - struct f2fs_summary *sum, unsigned short offset) + struct f2fs_summary *sum) { struct curseg_info *curseg = CURSEG_I(sbi, type); void *addr = curseg->sum_blk; - addr += offset * sizeof(struct f2fs_summary); + addr += curseg->next_blkoff * sizeof(struct f2fs_summary); memcpy(addr, sum, sizeof(struct f2fs_summary)); return; } @@ -311,64 +309,14 @@ static void write_sum_page(struct f2fs_sb_info *sbi, f2fs_put_page(page, 1); } -static unsigned int check_prefree_segments(struct f2fs_sb_info *sbi, int type) -{ - struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); - unsigned long *prefree_segmap = dirty_i->dirty_segmap[PRE]; - unsigned int segno; - unsigned int ofs = 0; - - /* - * If there is not enough reserved sections, - * we should not reuse prefree segments. - */ - if (has_not_enough_free_secs(sbi, 0)) - return NULL_SEGNO; - - /* - * NODE page should not reuse prefree segment, - * since those information is used for SPOR. - */ - if (IS_NODESEG(type)) - return NULL_SEGNO; -next: - segno = find_next_bit(prefree_segmap, TOTAL_SEGS(sbi), ofs); - ofs += sbi->segs_per_sec; - - if (segno < TOTAL_SEGS(sbi)) { - int i; - - /* skip intermediate segments in a section */ - if (segno % sbi->segs_per_sec) - goto next; - - /* skip if the section is currently used */ - if (sec_usage_check(sbi, GET_SECNO(sbi, segno))) - goto next; - - /* skip if whole section is not prefree */ - for (i = 1; i < sbi->segs_per_sec; i++) - if (!test_bit(segno + i, prefree_segmap)) - goto next; - - /* skip if whole section was not free at the last checkpoint */ - for (i = 0; i < sbi->segs_per_sec; i++) - if (get_seg_entry(sbi, segno + i)->ckpt_valid_blocks) - goto next; - - return segno; - } - return NULL_SEGNO; -} - static int is_next_segment_free(struct f2fs_sb_info *sbi, int type) { struct curseg_info *curseg = CURSEG_I(sbi, type); - unsigned int segno = curseg->segno; + unsigned int segno = curseg->segno + 1; struct free_segmap_info *free_i = FREE_I(sbi); - if (segno + 1 < TOTAL_SEGS(sbi) && (segno + 1) % sbi->segs_per_sec) - return !test_bit(segno + 1, free_i->free_segmap); + if (segno < TOTAL_SEGS(sbi) && segno % sbi->segs_per_sec) + return !test_bit(segno, free_i->free_segmap); return 0; } @@ -495,7 +443,7 @@ static void new_curseg(struct f2fs_sb_info *sbi, int type, bool new_sec) int dir = ALLOC_LEFT; write_sum_page(sbi, curseg->sum_blk, - GET_SUM_BLOCK(sbi, curseg->segno)); + GET_SUM_BLOCK(sbi, segno)); if (type == CURSEG_WARM_DATA || type == CURSEG_COLD_DATA) dir = ALLOC_RIGHT; @@ -599,11 +547,7 @@ static void allocate_segment_by_default(struct f2fs_sb_info *sbi, goto out; } - curseg->next_segno = check_prefree_segments(sbi, type); - - if (curseg->next_segno != NULL_SEGNO) - change_curseg(sbi, type, false); - else if (type == CURSEG_WARM_NODE) + if (type == CURSEG_WARM_NODE) new_curseg(sbi, type, false); else if (curseg->alloc_type == LFS && is_next_segment_free(sbi, type)) new_curseg(sbi, type, false); @@ -612,7 +556,10 @@ static void allocate_segment_by_default(struct f2fs_sb_info *sbi, else new_curseg(sbi, type, false); out: +#ifdef CONFIG_F2FS_STAT_FS sbi->segment_count[curseg->alloc_type]++; +#endif + return; } void allocate_new_segments(struct f2fs_sb_info *sbi) @@ -795,7 +742,7 @@ static int __get_segment_type_6(struct page *page, enum page_type p_type) if (S_ISDIR(inode->i_mode)) return CURSEG_HOT_DATA; - else if (is_cold_data(page) || is_cold_file(inode)) + else if (is_cold_data(page) || file_is_cold(inode)) return CURSEG_COLD_DATA; else return CURSEG_WARM_DATA; @@ -844,11 +791,13 @@ static void do_write_page(struct f2fs_sb_info *sbi, struct page *page, * because, this function updates a summary entry in the * current summary block. */ - __add_sum_entry(sbi, type, sum, curseg->next_blkoff); + __add_sum_entry(sbi, type, sum); mutex_lock(&sit_i->sentry_lock); __refresh_next_blkoff(sbi, curseg); +#ifdef CONFIG_F2FS_STAT_FS sbi->block_count[curseg->alloc_type]++; +#endif /* * SIT information should be updated before segment allocation, @@ -943,7 +892,7 @@ void recover_data_page(struct f2fs_sb_info *sbi, curseg->next_blkoff = GET_SEGOFF_FROM_SEG0(sbi, new_blkaddr) & (sbi->blocks_per_seg - 1); - __add_sum_entry(sbi, type, sum, curseg->next_blkoff); + __add_sum_entry(sbi, type, sum); refresh_sit_entry(sbi, old_blkaddr, new_blkaddr); @@ -980,7 +929,7 @@ void rewrite_node_page(struct f2fs_sb_info *sbi, } curseg->next_blkoff = GET_SEGOFF_FROM_SEG0(sbi, new_blkaddr) & (sbi->blocks_per_seg - 1); - __add_sum_entry(sbi, type, sum, curseg->next_blkoff); + __add_sum_entry(sbi, type, sum); /* change the current log to the next block addr in advance */ if (next_segno != segno) { @@ -1579,13 +1528,13 @@ static void init_dirty_segmap(struct f2fs_sb_info *sbi) { struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); struct free_segmap_info *free_i = FREE_I(sbi); - unsigned int segno = 0, offset = 0; + unsigned int segno = 0, offset = 0, total_segs = TOTAL_SEGS(sbi); unsigned short valid_blocks; - while (segno < TOTAL_SEGS(sbi)) { + while (1) { /* find dirty segment based on free segmap */ - segno = find_next_inuse(free_i, TOTAL_SEGS(sbi), offset); - if (segno >= TOTAL_SEGS(sbi)) + segno = find_next_inuse(free_i, total_segs, offset); + if (segno >= total_segs) break; offset = segno + 1; valid_blocks = get_valid_blocks(sbi, segno, 0); diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c index 8555f7df82c7..75c7dc363e92 100644 --- a/fs/f2fs/super.c +++ b/fs/f2fs/super.c @@ -34,7 +34,7 @@ static struct kmem_cache *f2fs_inode_cachep; enum { - Opt_gc_background_off, + Opt_gc_background, Opt_disable_roll_forward, Opt_discard, Opt_noheap, @@ -46,7 +46,7 @@ enum { }; static match_table_t f2fs_tokens = { - {Opt_gc_background_off, "background_gc_off"}, + {Opt_gc_background, "background_gc=%s"}, {Opt_disable_roll_forward, "disable_roll_forward"}, {Opt_discard, "discard"}, {Opt_noheap, "no_heap"}, @@ -76,6 +76,91 @@ static void init_once(void *foo) inode_init_once(&fi->vfs_inode); } +static int parse_options(struct super_block *sb, char *options) +{ + struct f2fs_sb_info *sbi = F2FS_SB(sb); + substring_t args[MAX_OPT_ARGS]; + char *p, *name; + int arg = 0; + + if (!options) + return 0; + + while ((p = strsep(&options, ",")) != NULL) { + int token; + if (!*p) + continue; + /* + * Initialize args struct so we know whether arg was + * found; some options take optional arguments. + */ + args[0].to = args[0].from = NULL; + token = match_token(p, f2fs_tokens, args); + + switch (token) { + case Opt_gc_background: + name = match_strdup(&args[0]); + + if (!name) + return -ENOMEM; + if (!strncmp(name, "on", 2)) + set_opt(sbi, BG_GC); + else if (!strncmp(name, "off", 3)) + clear_opt(sbi, BG_GC); + else { + kfree(name); + return -EINVAL; + } + kfree(name); + break; + case Opt_disable_roll_forward: + set_opt(sbi, DISABLE_ROLL_FORWARD); + break; + case Opt_discard: + set_opt(sbi, DISCARD); + break; + case Opt_noheap: + set_opt(sbi, NOHEAP); + break; +#ifdef CONFIG_F2FS_FS_XATTR + case Opt_nouser_xattr: + clear_opt(sbi, XATTR_USER); + break; +#else + case Opt_nouser_xattr: + f2fs_msg(sb, KERN_INFO, + "nouser_xattr options not supported"); + break; +#endif +#ifdef CONFIG_F2FS_FS_POSIX_ACL + case Opt_noacl: + clear_opt(sbi, POSIX_ACL); + break; +#else + case Opt_noacl: + f2fs_msg(sb, KERN_INFO, "noacl options not supported"); + break; +#endif + case Opt_active_logs: + if (args->from && match_int(args, &arg)) + return -EINVAL; + if (arg != 2 && arg != 4 && arg != NR_CURSEG_TYPE) + return -EINVAL; + sbi->active_logs = arg; + break; + case Opt_disable_ext_identify: + set_opt(sbi, DISABLE_EXT_IDENTIFY); + break; + default: + f2fs_msg(sb, KERN_ERR, + "Unrecognized mount option \"%s\" or missing value", + p); + return -EINVAL; + } + } + return 0; +} + static struct inode *f2fs_alloc_inode(struct super_block *sb) { struct f2fs_inode_info *fi; @@ -112,6 +197,17 @@ static int f2fs_drop_inode(struct inode *inode) return generic_drop_inode(inode); } +/* + * f2fs_dirty_inode() is called from __mark_inode_dirty() + * + * We should call set_dirty_inode to write the dirty inode through write_inode. + */ +static void f2fs_dirty_inode(struct inode *inode, int flags) +{ + set_inode_flag(F2FS_I(inode), FI_DIRTY_INODE); + return; +} + static void f2fs_i_callback(struct rcu_head *head) { struct inode *inode = container_of(head, struct inode, i_rcu); @@ -170,7 +266,7 @@ static int f2fs_freeze(struct super_block *sb) { int err; - if (sb->s_flags & MS_RDONLY) + if (f2fs_readonly(sb)) return 0; err = f2fs_sync_fs(sb, 1); @@ -214,10 +310,10 @@ static int f2fs_show_options(struct seq_file *seq, struct dentry *root) { struct f2fs_sb_info *sbi = F2FS_SB(root->d_sb); - if (test_opt(sbi, BG_GC)) - seq_puts(seq, ",background_gc_on"); + if (!(root->d_sb->s_flags & MS_RDONLY) && test_opt(sbi, BG_GC)) + seq_printf(seq, ",background_gc=%s", "on"); else - seq_puts(seq, ",background_gc_off"); + seq_printf(seq, ",background_gc=%s", "off"); if (test_opt(sbi, DISABLE_ROLL_FORWARD)) seq_puts(seq, ",disable_roll_forward"); if (test_opt(sbi, DISCARD)) @@ -244,11 +340,64 @@ static int f2fs_show_options(struct seq_file *seq, struct dentry *root) return 0; } +static int f2fs_remount(struct super_block *sb, int *flags, char *data) +{ + struct f2fs_sb_info *sbi = F2FS_SB(sb); + struct f2fs_mount_info org_mount_opt; + int err, active_logs; + + /* + * Save the old mount options in case we + * need to restore them. + */ + org_mount_opt = sbi->mount_opt; + active_logs = sbi->active_logs; + + /* parse mount options */ + err = parse_options(sb, data); + if (err) + goto restore_opts; + + /* + * Previous and new state of filesystem is RO, + * so no point in checking GC conditions. + */ + if ((sb->s_flags & MS_RDONLY) && (*flags & MS_RDONLY)) + goto skip; + + /* + * We stop the GC thread if FS is mounted as RO + * or if background_gc = off is passed in mount + * option. Also sync the filesystem. + */ + if ((*flags & MS_RDONLY) || !test_opt(sbi, BG_GC)) { + if (sbi->gc_thread) { + stop_gc_thread(sbi); + f2fs_sync_fs(sb, 1); + } + } else if (test_opt(sbi, BG_GC) && !sbi->gc_thread) { + err = start_gc_thread(sbi); + if (err) + goto restore_opts; + } +skip: + /* Update the POSIXACL Flag */ + sb->s_flags = (sb->s_flags & ~MS_POSIXACL) | + (test_opt(sbi, POSIX_ACL) ? MS_POSIXACL : 0); + return 0; + +restore_opts: + sbi->mount_opt = org_mount_opt; + sbi->active_logs = active_logs; + return err; +} + static struct super_operations f2fs_sops = { .alloc_inode = f2fs_alloc_inode, .drop_inode = f2fs_drop_inode, .destroy_inode = f2fs_destroy_inode, .write_inode = f2fs_write_inode, + .dirty_inode = f2fs_dirty_inode, .show_options = f2fs_show_options, .evict_inode = f2fs_evict_inode, .put_super = f2fs_put_super, @@ -256,6 +405,7 @@ static struct super_operations f2fs_sops = { .freeze_fs = f2fs_freeze, .unfreeze_fs = f2fs_unfreeze, .statfs = f2fs_statfs, + .remount_fs = f2fs_remount, }; static struct inode *f2fs_nfs_get_inode(struct super_block *sb, @@ -303,79 +453,6 @@ static const struct export_operations f2fs_export_ops = { .get_parent = f2fs_get_parent, }; -static int parse_options(struct super_block *sb, struct f2fs_sb_info *sbi, - char *options) -{ - substring_t args[MAX_OPT_ARGS]; - char *p; - int arg = 0; - - if (!options) - return 0; - - while ((p = strsep(&options, ",")) != NULL) { - int token; - if (!*p) - continue; - /* - * Initialize args struct so we know whether arg was - * found; some options take optional arguments. - */ - args[0].to = args[0].from = NULL; - token = match_token(p, f2fs_tokens, args); - - switch (token) { - case Opt_gc_background_off: - clear_opt(sbi, BG_GC); - break; - case Opt_disable_roll_forward: - set_opt(sbi, DISABLE_ROLL_FORWARD); - break; - case Opt_discard: - set_opt(sbi, DISCARD); - break; - case Opt_noheap: - set_opt(sbi, NOHEAP); - break; -#ifdef CONFIG_F2FS_FS_XATTR - case Opt_nouser_xattr: - clear_opt(sbi, XATTR_USER); - break; -#else - case Opt_nouser_xattr: - f2fs_msg(sb, KERN_INFO, - "nouser_xattr options not supported"); - break; -#endif -#ifdef CONFIG_F2FS_FS_POSIX_ACL - case Opt_noacl: - clear_opt(sbi, POSIX_ACL); - break; -#else - case Opt_noacl: - f2fs_msg(sb, KERN_INFO, "noacl options not supported"); - break; -#endif - case Opt_active_logs: - if (args->from && match_int(args, &arg)) - return -EINVAL; - if (arg != 2 && arg != 4 && arg != NR_CURSEG_TYPE) - return -EINVAL; - sbi->active_logs = arg; - break; - case Opt_disable_ext_identify: - set_opt(sbi, DISABLE_EXT_IDENTIFY); - break; - default: - f2fs_msg(sb, KERN_ERR, - "Unrecognized mount option \"%s\" or missing value", - p); - return -EINVAL; - } - } - return 0; -} - static loff_t max_file_size(unsigned bits) { loff_t result = ADDRS_PER_INODE; @@ -541,6 +618,7 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent) if (err) goto free_sb_buf; } + sb->s_fs_info = sbi; /* init some FS parameters */ sbi->active_logs = NR_CURSEG_TYPE; @@ -553,7 +631,7 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent) set_opt(sbi, POSIX_ACL); #endif /* parse mount options */ - err = parse_options(sb, sbi, (char *)data); + err = parse_options(sb, (char *)data); if (err) goto free_sb_buf; @@ -565,7 +643,6 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent) sb->s_xattr = f2fs_xattr_handlers; sb->s_export_op = &f2fs_export_ops; sb->s_magic = F2FS_SUPER_MAGIC; - sb->s_fs_info = sbi; sb->s_time_gran = 1; sb->s_flags = (sb->s_flags & ~MS_POSIXACL) | (test_opt(sbi, POSIX_ACL) ? MS_POSIXACL : 0); @@ -674,10 +751,16 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent) "Cannot recover all fsync data errno=%ld", err); } - /* After POR, we can run background GC thread */ - err = start_gc_thread(sbi); - if (err) - goto fail; + /* + * If filesystem is not mounted as read-only then + * do start the gc_thread. + */ + if (!(sb->s_flags & MS_RDONLY)) { + /* After POR, we can run background GC thread.*/ + err = start_gc_thread(sbi); + if (err) + goto fail; + } err = f2fs_build_stats(sbi); if (err) diff --git a/fs/f2fs/xattr.c b/fs/f2fs/xattr.c index 0b02dce31356..3ab07ecd86ca 100644 --- a/fs/f2fs/xattr.c +++ b/fs/f2fs/xattr.c @@ -20,6 +20,7 @@ */ #include <linux/rwsem.h> #include <linux/f2fs_fs.h> +#include <linux/security.h> #include "f2fs.h" #include "xattr.h" @@ -43,6 +44,10 @@ static size_t f2fs_xattr_generic_list(struct dentry *dentry, char *list, prefix = XATTR_TRUSTED_PREFIX; prefix_len = XATTR_TRUSTED_PREFIX_LEN; break; + case F2FS_XATTR_INDEX_SECURITY: + prefix = XATTR_SECURITY_PREFIX; + prefix_len = XATTR_SECURITY_PREFIX_LEN; + break; default: return -EINVAL; } @@ -50,7 +55,7 @@ static size_t f2fs_xattr_generic_list(struct dentry *dentry, char *list, total_len = prefix_len + name_len + 1; if (list && total_len <= list_size) { memcpy(list, prefix, prefix_len); - memcpy(list+prefix_len, name, name_len); + memcpy(list + prefix_len, name, name_len); list[prefix_len + name_len] = '\0'; } return total_len; @@ -70,13 +75,14 @@ static int f2fs_xattr_generic_get(struct dentry *dentry, const char *name, if (!capable(CAP_SYS_ADMIN)) return -EPERM; break; + case F2FS_XATTR_INDEX_SECURITY: + break; default: return -EINVAL; } if (strcmp(name, "") == 0) return -EINVAL; - return f2fs_getxattr(dentry->d_inode, type, name, - buffer, size); + return f2fs_getxattr(dentry->d_inode, type, name, buffer, size); } static int f2fs_xattr_generic_set(struct dentry *dentry, const char *name, @@ -93,13 +99,15 @@ static int f2fs_xattr_generic_set(struct dentry *dentry, const char *name, if (!capable(CAP_SYS_ADMIN)) return -EPERM; break; + case F2FS_XATTR_INDEX_SECURITY: + break; default: return -EINVAL; } if (strcmp(name, "") == 0) return -EINVAL; - return f2fs_setxattr(dentry->d_inode, type, name, value, size); + return f2fs_setxattr(dentry->d_inode, type, name, value, size, NULL); } static size_t f2fs_xattr_advise_list(struct dentry *dentry, char *list, @@ -145,6 +153,31 @@ static int f2fs_xattr_advise_set(struct dentry *dentry, const char *name, return 0; } +#ifdef CONFIG_F2FS_FS_SECURITY +static int f2fs_initxattrs(struct inode *inode, const struct xattr *xattr_array, + void *page) +{ + const struct xattr *xattr; + int err = 0; + + for (xattr = xattr_array; xattr->name != NULL; xattr++) { + err = f2fs_setxattr(inode, F2FS_XATTR_INDEX_SECURITY, + xattr->name, xattr->value, + xattr->value_len, (struct page *)page); + if (err < 0) + break; + } + return err; +} + +int f2fs_init_security(struct inode *inode, struct inode *dir, + const struct qstr *qstr, struct page *ipage) +{ + return security_inode_init_security(inode, dir, qstr, + &f2fs_initxattrs, ipage); +} +#endif + const struct xattr_handler f2fs_xattr_user_handler = { .prefix = XATTR_USER_PREFIX, .flags = F2FS_XATTR_INDEX_USER, @@ -169,6 +202,14 @@ const struct xattr_handler f2fs_xattr_advise_handler = { .set = f2fs_xattr_advise_set, }; +const struct xattr_handler f2fs_xattr_security_handler = { + .prefix = XATTR_SECURITY_PREFIX, + .flags = F2FS_XATTR_INDEX_SECURITY, + .list = f2fs_xattr_generic_list, + .get = f2fs_xattr_generic_get, + .set = f2fs_xattr_generic_set, +}; + static const struct xattr_handler *f2fs_xattr_handler_map[] = { [F2FS_XATTR_INDEX_USER] = &f2fs_xattr_user_handler, #ifdef CONFIG_F2FS_FS_POSIX_ACL @@ -176,6 +217,9 @@ static const struct xattr_handler *f2fs_xattr_handler_map[] = { [F2FS_XATTR_INDEX_POSIX_ACL_DEFAULT] = &f2fs_xattr_acl_default_handler, #endif [F2FS_XATTR_INDEX_TRUSTED] = &f2fs_xattr_trusted_handler, +#ifdef CONFIG_F2FS_FS_SECURITY + [F2FS_XATTR_INDEX_SECURITY] = &f2fs_xattr_security_handler, +#endif [F2FS_XATTR_INDEX_ADVISE] = &f2fs_xattr_advise_handler, }; @@ -186,6 +230,9 @@ const struct xattr_handler *f2fs_xattr_handlers[] = { &f2fs_xattr_acl_default_handler, #endif &f2fs_xattr_trusted_handler, +#ifdef CONFIG_F2FS_FS_SECURITY + &f2fs_xattr_security_handler, +#endif &f2fs_xattr_advise_handler, NULL, }; @@ -218,6 +265,8 @@ int f2fs_getxattr(struct inode *inode, int name_index, const char *name, return -ENODATA; page = get_node_page(sbi, fi->i_xattr_nid); + if (IS_ERR(page)) + return PTR_ERR(page); base_addr = page_address(page); list_for_each_xattr(entry, base_addr) { @@ -268,6 +317,8 @@ ssize_t f2fs_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size) return 0; page = get_node_page(sbi, fi->i_xattr_nid); + if (IS_ERR(page)) + return PTR_ERR(page); base_addr = page_address(page); list_for_each_xattr(entry, base_addr) { @@ -296,7 +347,7 @@ cleanup: } int f2fs_setxattr(struct inode *inode, int name_index, const char *name, - const void *value, size_t value_len) + const void *value, size_t value_len, struct page *ipage) { struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); struct f2fs_inode_info *fi = F2FS_I(inode); @@ -335,7 +386,7 @@ int f2fs_setxattr(struct inode *inode, int name_index, const char *name, set_new_dnode(&dn, inode, NULL, NULL, fi->i_xattr_nid); mark_inode_dirty(inode); - page = new_node_page(&dn, XATTR_NODE_OFFSET); + page = new_node_page(&dn, XATTR_NODE_OFFSET, ipage); if (IS_ERR(page)) { alloc_nid_failed(sbi, fi->i_xattr_nid); fi->i_xattr_nid = 0; @@ -435,7 +486,10 @@ int f2fs_setxattr(struct inode *inode, int name_index, const char *name, inode->i_ctime = CURRENT_TIME; clear_inode_flag(fi, FI_ACL_MODE); } - update_inode_page(inode); + if (ipage) + update_inode(inode, ipage); + else + update_inode_page(inode); mutex_unlock_op(sbi, ilock); return 0; diff --git a/fs/f2fs/xattr.h b/fs/f2fs/xattr.h index 49c9558305e3..3c0817bef25d 100644 --- a/fs/f2fs/xattr.h +++ b/fs/f2fs/xattr.h @@ -112,21 +112,19 @@ extern const struct xattr_handler f2fs_xattr_trusted_handler; extern const struct xattr_handler f2fs_xattr_acl_access_handler; extern const struct xattr_handler f2fs_xattr_acl_default_handler; extern const struct xattr_handler f2fs_xattr_advise_handler; +extern const struct xattr_handler f2fs_xattr_security_handler; extern const struct xattr_handler *f2fs_xattr_handlers[]; -extern int f2fs_setxattr(struct inode *inode, int name_index, const char *name, - const void *value, size_t value_len); -extern int f2fs_getxattr(struct inode *inode, int name_index, const char *name, - void *buffer, size_t buffer_size); -extern ssize_t f2fs_listxattr(struct dentry *dentry, char *buffer, - size_t buffer_size); - +extern int f2fs_setxattr(struct inode *, int, const char *, + const void *, size_t, struct page *); +extern int f2fs_getxattr(struct inode *, int, const char *, void *, size_t); +extern ssize_t f2fs_listxattr(struct dentry *, char *, size_t); #else #define f2fs_xattr_handlers NULL static inline int f2fs_setxattr(struct inode *inode, int name_index, - const char *name, const void *value, size_t value_len) + const char *name, const void *value, size_t value_len) { return -EOPNOTSUPP; } @@ -142,4 +140,14 @@ static inline ssize_t f2fs_listxattr(struct dentry *dentry, char *buffer, } #endif +#ifdef CONFIG_F2FS_FS_SECURITY +extern int f2fs_init_security(struct inode *, struct inode *, + const struct qstr *, struct page *); +#else +static inline int f2fs_init_security(struct inode *inode, struct inode *dir, + const struct qstr *qstr, struct page *ipage) +{ + return 0; +} +#endif #endif /* __F2FS_XATTR_H__ */ diff --git a/fs/fat/dir.c b/fs/fat/dir.c index 7a6f02caf286..3963ede84eb0 100644 --- a/fs/fat/dir.c +++ b/fs/fat/dir.c @@ -543,6 +543,7 @@ end_of_dir: EXPORT_SYMBOL_GPL(fat_search_long); struct fat_ioctl_filldir_callback { + struct dir_context ctx; void __user *dirent; int result; /* for dir ioctl */ @@ -552,8 +553,9 @@ struct fat_ioctl_filldir_callback { int short_len; }; -static int __fat_readdir(struct inode *inode, struct file *filp, void *dirent, - filldir_t filldir, int short_only, int both) +static int __fat_readdir(struct inode *inode, struct file *file, + struct dir_context *ctx, int short_only, + struct fat_ioctl_filldir_callback *both) { struct super_block *sb = inode->i_sb; struct msdos_sb_info *sbi = MSDOS_SB(sb); @@ -564,27 +566,20 @@ static int __fat_readdir(struct inode *inode, struct file *filp, void *dirent, unsigned char bufname[FAT_MAX_SHORT_SIZE]; int isvfat = sbi->options.isvfat; const char *fill_name = NULL; - unsigned long inum; - unsigned long lpos, dummy, *furrfu = &lpos; + int fake_offset = 0; loff_t cpos; int short_len = 0, fill_len = 0; int ret = 0; mutex_lock(&sbi->s_lock); - cpos = filp->f_pos; + cpos = ctx->pos; /* Fake . and .. for the root directory. */ if (inode->i_ino == MSDOS_ROOT_INO) { - while (cpos < 2) { - if (filldir(dirent, "..", cpos+1, cpos, - MSDOS_ROOT_INO, DT_DIR) < 0) - goto out; - cpos++; - filp->f_pos++; - } - if (cpos == 2) { - dummy = 2; - furrfu = &dummy; + if (!dir_emit_dots(file, ctx)) + goto out; + if (ctx->pos == 2) { + fake_offset = 1; cpos = 0; } } @@ -619,7 +614,7 @@ parse_record: int status = fat_parse_long(inode, &cpos, &bh, &de, &unicode, &nr_slots); if (status < 0) { - filp->f_pos = cpos; + ctx->pos = cpos; ret = status; goto out; } else if (status == PARSE_INVALID) @@ -639,6 +634,19 @@ parse_record: /* !both && !short_only, so we don't need shortname. */ if (!both) goto start_filldir; + + short_len = fat_parse_short(sb, de, bufname, + sbi->options.dotsOK); + if (short_len == 0) + goto record_end; + /* hack for fat_ioctl_filldir() */ + both->longname = fill_name; + both->long_len = fill_len; + both->shortname = bufname; + both->short_len = short_len; + fill_name = NULL; + fill_len = 0; + goto start_filldir; } } @@ -646,28 +654,21 @@ parse_record: if (short_len == 0) goto record_end; - if (nr_slots) { - /* hack for fat_ioctl_filldir() */ - struct fat_ioctl_filldir_callback *p = dirent; - - p->longname = fill_name; - p->long_len = fill_len; - p->shortname = bufname; - p->short_len = short_len; - fill_name = NULL; - fill_len = 0; - } else { - fill_name = bufname; - fill_len = short_len; - } + fill_name = bufname; + fill_len = short_len; start_filldir: - lpos = cpos - (nr_slots + 1) * sizeof(struct msdos_dir_entry); - if (!memcmp(de->name, MSDOS_DOT, MSDOS_NAME)) - inum = inode->i_ino; - else if (!memcmp(de->name, MSDOS_DOTDOT, MSDOS_NAME)) { - inum = parent_ino(filp->f_path.dentry); + if (!fake_offset) + ctx->pos = cpos - (nr_slots + 1) * sizeof(struct msdos_dir_entry); + + if (!memcmp(de->name, MSDOS_DOT, MSDOS_NAME)) { + if (!dir_emit_dot(file, ctx)) + goto fill_failed; + } else if (!memcmp(de->name, MSDOS_DOTDOT, MSDOS_NAME)) { + if (!dir_emit_dotdot(file, ctx)) + goto fill_failed; } else { + unsigned long inum; loff_t i_pos = fat_make_i_pos(sb, bh, de); struct inode *tmp = fat_iget(sb, i_pos); if (tmp) { @@ -675,18 +676,17 @@ start_filldir: iput(tmp); } else inum = iunique(sb, MSDOS_ROOT_INO); + if (!dir_emit(ctx, fill_name, fill_len, inum, + (de->attr & ATTR_DIR) ? DT_DIR : DT_REG)) + goto fill_failed; } - if (filldir(dirent, fill_name, fill_len, *furrfu, inum, - (de->attr & ATTR_DIR) ? DT_DIR : DT_REG) < 0) - goto fill_failed; - record_end: - furrfu = &lpos; - filp->f_pos = cpos; + fake_offset = 0; + ctx->pos = cpos; goto get_new; end_of_dir: - filp->f_pos = cpos; + ctx->pos = cpos; fill_failed: brelse(bh); if (unicode) @@ -696,10 +696,9 @@ out: return ret; } -static int fat_readdir(struct file *filp, void *dirent, filldir_t filldir) +static int fat_readdir(struct file *file, struct dir_context *ctx) { - struct inode *inode = file_inode(filp); - return __fat_readdir(inode, filp, dirent, filldir, 0, 0); + return __fat_readdir(file_inode(file), file, ctx, 0, NULL); } #define FAT_IOCTL_FILLDIR_FUNC(func, dirent_type) \ @@ -755,20 +754,25 @@ efault: \ FAT_IOCTL_FILLDIR_FUNC(fat_ioctl_filldir, __fat_dirent) -static int fat_ioctl_readdir(struct inode *inode, struct file *filp, +static int fat_ioctl_readdir(struct inode *inode, struct file *file, void __user *dirent, filldir_t filldir, int short_only, int both) { - struct fat_ioctl_filldir_callback buf; + struct fat_ioctl_filldir_callback buf = { + .ctx.actor = filldir, + .dirent = dirent + }; int ret; buf.dirent = dirent; buf.result = 0; mutex_lock(&inode->i_mutex); + buf.ctx.pos = file->f_pos; ret = -ENOENT; if (!IS_DEADDIR(inode)) { - ret = __fat_readdir(inode, filp, &buf, filldir, - short_only, both); + ret = __fat_readdir(inode, file, &buf.ctx, + short_only, both ? &buf : NULL); + file->f_pos = buf.ctx.pos; } mutex_unlock(&inode->i_mutex); if (ret >= 0) @@ -854,7 +858,7 @@ static long fat_compat_dir_ioctl(struct file *filp, unsigned cmd, const struct file_operations fat_dir_operations = { .llseek = generic_file_llseek, .read = generic_read_dir, - .readdir = fat_readdir, + .iterate = fat_readdir, .unlocked_ioctl = fat_dir_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = fat_compat_dir_ioctl, diff --git a/fs/fat/misc.c b/fs/fat/misc.c index 359d307b5507..628e22a5a543 100644 --- a/fs/fat/misc.c +++ b/fs/fat/misc.c @@ -30,7 +30,7 @@ void __fat_fs_error(struct super_block *sb, int report, const char *fmt, ...) va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; - printk(KERN_ERR "FAT-fs (%s): error, %pV\n", sb->s_id, &vaf); + fat_msg(sb, KERN_ERR, "error, %pV", &vaf); va_end(args); } @@ -38,8 +38,7 @@ void __fat_fs_error(struct super_block *sb, int report, const char *fmt, ...) panic("FAT-fs (%s): fs panic from previous error\n", sb->s_id); else if (opts->errors == FAT_ERRORS_RO && !(sb->s_flags & MS_RDONLY)) { sb->s_flags |= MS_RDONLY; - printk(KERN_ERR "FAT-fs (%s): Filesystem has been " - "set read-only\n", sb->s_id); + fat_msg(sb, KERN_ERR, "Filesystem has been set read-only"); } } EXPORT_SYMBOL_GPL(__fat_fs_error); diff --git a/fs/fat/namei_msdos.c b/fs/fat/namei_msdos.c index 081b759cff83..a783b0e1272a 100644 --- a/fs/fat/namei_msdos.c +++ b/fs/fat/namei_msdos.c @@ -148,8 +148,7 @@ static int msdos_find(struct inode *dir, const unsigned char *name, int len, * that the existing dentry can be used. The msdos fs routines will * return ENOENT or EINVAL as appropriate. */ -static int msdos_hash(const struct dentry *dentry, const struct inode *inode, - struct qstr *qstr) +static int msdos_hash(const struct dentry *dentry, struct qstr *qstr) { struct fat_mount_options *options = &MSDOS_SB(dentry->d_sb)->options; unsigned char msdos_name[MSDOS_NAME]; @@ -165,8 +164,7 @@ static int msdos_hash(const struct dentry *dentry, const struct inode *inode, * Compare two msdos names. If either of the names are invalid, * we fall back to doing the standard name comparison. */ -static int msdos_cmp(const struct dentry *parent, const struct inode *pinode, - const struct dentry *dentry, const struct inode *inode, +static int msdos_cmp(const struct dentry *parent, const struct dentry *dentry, unsigned int len, const char *str, const struct qstr *name) { struct fat_mount_options *options = &MSDOS_SB(parent->d_sb)->options; diff --git a/fs/fat/namei_vfat.c b/fs/fat/namei_vfat.c index 2da952036a3d..6df8d3d885e5 100644 --- a/fs/fat/namei_vfat.c +++ b/fs/fat/namei_vfat.c @@ -107,8 +107,7 @@ static unsigned int vfat_striptail_len(const struct qstr *qstr) * that the existing dentry can be used. The vfat fs routines will * return ENOENT or EINVAL as appropriate. */ -static int vfat_hash(const struct dentry *dentry, const struct inode *inode, - struct qstr *qstr) +static int vfat_hash(const struct dentry *dentry, struct qstr *qstr) { qstr->hash = full_name_hash(qstr->name, vfat_striptail_len(qstr)); return 0; @@ -120,8 +119,7 @@ static int vfat_hash(const struct dentry *dentry, const struct inode *inode, * that the existing dentry can be used. The vfat fs routines will * return ENOENT or EINVAL as appropriate. */ -static int vfat_hashi(const struct dentry *dentry, const struct inode *inode, - struct qstr *qstr) +static int vfat_hashi(const struct dentry *dentry, struct qstr *qstr) { struct nls_table *t = MSDOS_SB(dentry->d_sb)->nls_io; const unsigned char *name; @@ -142,8 +140,7 @@ static int vfat_hashi(const struct dentry *dentry, const struct inode *inode, /* * Case insensitive compare of two vfat names. */ -static int vfat_cmpi(const struct dentry *parent, const struct inode *pinode, - const struct dentry *dentry, const struct inode *inode, +static int vfat_cmpi(const struct dentry *parent, const struct dentry *dentry, unsigned int len, const char *str, const struct qstr *name) { struct nls_table *t = MSDOS_SB(parent->d_sb)->nls_io; @@ -162,8 +159,7 @@ static int vfat_cmpi(const struct dentry *parent, const struct inode *pinode, /* * Case sensitive compare of two vfat names. */ -static int vfat_cmp(const struct dentry *parent, const struct inode *pinode, - const struct dentry *dentry, const struct inode *inode, +static int vfat_cmp(const struct dentry *parent, const struct dentry *dentry, unsigned int len, const char *str, const struct qstr *name) { unsigned int alen, blen; diff --git a/fs/file_table.c b/fs/file_table.c index cd4d87a82951..08e719b884ca 100644 --- a/fs/file_table.c +++ b/fs/file_table.c @@ -227,7 +227,7 @@ static void __fput(struct file *file) { struct dentry *dentry = file->f_path.dentry; struct vfsmount *mnt = file->f_path.mnt; - struct inode *inode = dentry->d_inode; + struct inode *inode = file->f_inode; might_sleep(); @@ -306,17 +306,18 @@ void fput(struct file *file) { if (atomic_long_dec_and_test(&file->f_count)) { struct task_struct *task = current; + unsigned long flags; + file_sb_list_del(file); - if (unlikely(in_interrupt() || task->flags & PF_KTHREAD)) { - unsigned long flags; - spin_lock_irqsave(&delayed_fput_lock, flags); - list_add(&file->f_u.fu_list, &delayed_fput_list); - schedule_work(&delayed_fput_work); - spin_unlock_irqrestore(&delayed_fput_lock, flags); - return; + if (likely(!in_interrupt() && !(task->flags & PF_KTHREAD))) { + init_task_work(&file->f_u.fu_rcuhead, ____fput); + if (!task_work_add(task, &file->f_u.fu_rcuhead, true)) + return; } - init_task_work(&file->f_u.fu_rcuhead, ____fput); - task_work_add(task, &file->f_u.fu_rcuhead, true); + spin_lock_irqsave(&delayed_fput_lock, flags); + list_add(&file->f_u.fu_list, &delayed_fput_list); + schedule_work(&delayed_fput_work); + spin_unlock_irqrestore(&delayed_fput_lock, flags); } } diff --git a/fs/freevxfs/vxfs_lookup.c b/fs/freevxfs/vxfs_lookup.c index 664b07a53870..25d4099a4aea 100644 --- a/fs/freevxfs/vxfs_lookup.c +++ b/fs/freevxfs/vxfs_lookup.c @@ -49,7 +49,7 @@ static struct dentry * vxfs_lookup(struct inode *, struct dentry *, unsigned int); -static int vxfs_readdir(struct file *, void *, filldir_t); +static int vxfs_readdir(struct file *, struct dir_context *); const struct inode_operations vxfs_dir_inode_ops = { .lookup = vxfs_lookup, @@ -58,7 +58,7 @@ const struct inode_operations vxfs_dir_inode_ops = { const struct file_operations vxfs_dir_operations = { .llseek = generic_file_llseek, .read = generic_read_dir, - .readdir = vxfs_readdir, + .iterate = vxfs_readdir, }; @@ -235,7 +235,7 @@ vxfs_lookup(struct inode *dip, struct dentry *dp, unsigned int flags) * Zero. */ static int -vxfs_readdir(struct file *fp, void *retp, filldir_t filler) +vxfs_readdir(struct file *fp, struct dir_context *ctx) { struct inode *ip = file_inode(fp); struct super_block *sbp = ip->i_sb; @@ -243,20 +243,17 @@ vxfs_readdir(struct file *fp, void *retp, filldir_t filler) u_long page, npages, block, pblocks, nblocks, offset; loff_t pos; - switch ((long)fp->f_pos) { - case 0: - if (filler(retp, ".", 1, fp->f_pos, ip->i_ino, DT_DIR) < 0) - goto out; - fp->f_pos++; - /* fallthrough */ - case 1: - if (filler(retp, "..", 2, fp->f_pos, VXFS_INO(ip)->vii_dotdot, DT_DIR) < 0) - goto out; - fp->f_pos++; - /* fallthrough */ + if (ctx->pos == 0) { + if (!dir_emit_dot(fp, ctx)) + return 0; + ctx->pos = 1; } - - pos = fp->f_pos - 2; + if (ctx->pos == 1) { + if (!dir_emit(ctx, "..", 2, VXFS_INO(ip)->vii_dotdot, DT_DIR)) + return 0; + ctx->pos = 2; + } + pos = ctx->pos - 2; if (pos > VXFS_DIRROUND(ip->i_size)) return 0; @@ -270,16 +267,16 @@ vxfs_readdir(struct file *fp, void *retp, filldir_t filler) block = (u_long)(pos >> sbp->s_blocksize_bits) % pblocks; for (; page < npages; page++, block = 0) { - caddr_t kaddr; + char *kaddr; struct page *pp; pp = vxfs_get_page(ip->i_mapping, page); if (IS_ERR(pp)) continue; - kaddr = (caddr_t)page_address(pp); + kaddr = (char *)page_address(pp); for (; block <= nblocks && block <= pblocks; block++) { - caddr_t baddr, limit; + char *baddr, *limit; struct vxfs_dirblk *dbp; struct vxfs_direct *de; @@ -292,21 +289,18 @@ vxfs_readdir(struct file *fp, void *retp, filldir_t filler) (kaddr + offset) : (baddr + VXFS_DIRBLKOV(dbp))); - for (; (caddr_t)de <= limit; de = vxfs_next_entry(de)) { - int over; - + for (; (char *)de <= limit; de = vxfs_next_entry(de)) { if (!de->d_reclen) break; if (!de->d_ino) continue; - offset = (caddr_t)de - kaddr; - over = filler(retp, de->d_name, de->d_namelen, - ((page << PAGE_CACHE_SHIFT) | offset) + 2, - de->d_ino, DT_UNKNOWN); - if (over) { + offset = (char *)de - kaddr; + ctx->pos = ((page << PAGE_CACHE_SHIFT) | offset) + 2; + if (!dir_emit(ctx, de->d_name, de->d_namelen, + de->d_ino, DT_UNKNOWN)) { vxfs_put_page(pp); - goto done; + return 0; } } offset = 0; @@ -314,9 +308,6 @@ vxfs_readdir(struct file *fp, void *retp, filldir_t filler) vxfs_put_page(pp); offset = 0; } - -done: - fp->f_pos = ((page << PAGE_CACHE_SHIFT) | offset) + 2; -out: + ctx->pos = ((page << PAGE_CACHE_SHIFT) | offset) + 2; return 0; } diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index 3be57189efd5..a85ac4e33436 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -45,6 +45,7 @@ struct wb_writeback_work { unsigned int for_kupdate:1; unsigned int range_cyclic:1; unsigned int for_background:1; + unsigned int for_sync:1; /* sync(2) WB_SYNC_ALL writeback */ enum wb_reason reason; /* why was writeback initiated? */ struct list_head list; /* pending work list */ @@ -443,9 +444,11 @@ __writeback_single_inode(struct inode *inode, struct writeback_control *wbc) /* * Make sure to wait on the data before writing out the metadata. * This is important for filesystems that modify metadata on data - * I/O completion. + * I/O completion. We don't do it for sync(2) writeback because it has a + * separate, external IO completion path and ->sync_fs for guaranteeing + * inode metadata is written back correctly. */ - if (wbc->sync_mode == WB_SYNC_ALL) { + if (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync) { int err = filemap_fdatawait(mapping); if (ret == 0) ret = err; @@ -578,6 +581,7 @@ static long writeback_sb_inodes(struct super_block *sb, .tagged_writepages = work->tagged_writepages, .for_kupdate = work->for_kupdate, .for_background = work->for_background, + .for_sync = work->for_sync, .range_cyclic = work->range_cyclic, .range_start = 0, .range_end = LLONG_MAX, @@ -1362,6 +1366,7 @@ void sync_inodes_sb(struct super_block *sb) .range_cyclic = 0, .done = &done, .reason = WB_REASON_SYNC, + .for_sync = 1, }; /* Nothing to do? */ diff --git a/fs/fscache/cache.c b/fs/fscache/cache.c index b52aed1dca97..f7cff367db7f 100644 --- a/fs/fscache/cache.c +++ b/fs/fscache/cache.c @@ -115,7 +115,7 @@ struct fscache_cache *fscache_select_cache_for_object( struct fscache_object, cookie_link); cache = object->cache; - if (object->state >= FSCACHE_OBJECT_DYING || + if (fscache_object_is_dying(object) || test_bit(FSCACHE_IOERROR, &cache->flags)) cache = NULL; @@ -224,8 +224,10 @@ int fscache_add_cache(struct fscache_cache *cache, BUG_ON(!ifsdef); cache->flags = 0; - ifsdef->event_mask = ULONG_MAX & ~(1 << FSCACHE_OBJECT_EV_CLEARED); - ifsdef->state = FSCACHE_OBJECT_ACTIVE; + ifsdef->event_mask = + ((1 << NR_FSCACHE_OBJECT_EVENTS) - 1) & + ~(1 << FSCACHE_OBJECT_EV_CLEARED); + __set_bit(FSCACHE_OBJECT_IS_AVAILABLE, &ifsdef->flags); if (!tagname) tagname = cache->identifier; @@ -330,25 +332,25 @@ static void fscache_withdraw_all_objects(struct fscache_cache *cache, { struct fscache_object *object; - spin_lock(&cache->object_list_lock); - while (!list_empty(&cache->object_list)) { - object = list_entry(cache->object_list.next, - struct fscache_object, cache_link); - list_move_tail(&object->cache_link, dying_objects); + spin_lock(&cache->object_list_lock); - _debug("withdraw %p", object->cookie); + if (!list_empty(&cache->object_list)) { + object = list_entry(cache->object_list.next, + struct fscache_object, cache_link); + list_move_tail(&object->cache_link, dying_objects); - spin_lock(&object->lock); - spin_unlock(&cache->object_list_lock); - fscache_raise_event(object, FSCACHE_OBJECT_EV_WITHDRAW); - spin_unlock(&object->lock); + _debug("withdraw %p", object->cookie); + + /* This must be done under object_list_lock to prevent + * a race with fscache_drop_object(). + */ + fscache_raise_event(object, FSCACHE_OBJECT_EV_KILL); + } + spin_unlock(&cache->object_list_lock); cond_resched(); - spin_lock(&cache->object_list_lock); } - - spin_unlock(&cache->object_list_lock); } /** diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c index e2cba1f60c21..0e91a3c9fdb2 100644 --- a/fs/fscache/cookie.c +++ b/fs/fscache/cookie.c @@ -95,6 +95,11 @@ struct fscache_cookie *__fscache_acquire_cookie( atomic_set(&cookie->usage, 1); atomic_set(&cookie->n_children, 0); + /* We keep the active count elevated until relinquishment to prevent an + * attempt to wake up every time the object operations queue quiesces. + */ + atomic_set(&cookie->n_active, 1); + atomic_inc(&parent->usage); atomic_inc(&parent->n_children); @@ -177,7 +182,6 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie) cookie->flags = (1 << FSCACHE_COOKIE_LOOKING_UP) | - (1 << FSCACHE_COOKIE_CREATING) | (1 << FSCACHE_COOKIE_NO_DATA_YET); /* ask the cache to allocate objects for this cookie and its parent @@ -205,7 +209,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie) /* initiate the process of looking up all the objects in the chain * (done by fscache_initialise_object()) */ - fscache_enqueue_object(object); + fscache_raise_event(object, FSCACHE_OBJECT_EV_NEW_CHILD); spin_unlock(&cookie->lock); @@ -285,7 +289,7 @@ static int fscache_alloc_object(struct fscache_cache *cache, object_already_extant: ret = -ENOBUFS; - if (object->state >= FSCACHE_OBJECT_DYING) { + if (fscache_object_is_dead(object)) { spin_unlock(&cookie->lock); goto error; } @@ -321,7 +325,7 @@ static int fscache_attach_object(struct fscache_cookie *cookie, ret = -EEXIST; hlist_for_each_entry(p, &cookie->backing_objects, cookie_link) { if (p->cache == object->cache) { - if (p->state >= FSCACHE_OBJECT_DYING) + if (fscache_object_is_dying(p)) ret = -ENOBUFS; goto cant_attach_object; } @@ -332,7 +336,7 @@ static int fscache_attach_object(struct fscache_cookie *cookie, hlist_for_each_entry(p, &cookie->parent->backing_objects, cookie_link) { if (p->cache == object->cache) { - if (p->state >= FSCACHE_OBJECT_DYING) { + if (fscache_object_is_dying(p)) { ret = -ENOBUFS; spin_unlock(&cookie->parent->lock); goto cant_attach_object; @@ -400,7 +404,7 @@ void __fscache_invalidate(struct fscache_cookie *cookie) object = hlist_entry(cookie->backing_objects.first, struct fscache_object, cookie_link); - if (object->state < FSCACHE_OBJECT_DYING) + if (fscache_object_is_live(object)) fscache_raise_event( object, FSCACHE_OBJECT_EV_INVALIDATE); } @@ -467,9 +471,7 @@ EXPORT_SYMBOL(__fscache_update_cookie); */ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire) { - struct fscache_cache *cache; struct fscache_object *object; - unsigned long event; fscache_stat(&fscache_n_relinquishes); if (retire) @@ -481,8 +483,11 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire) return; } - _enter("%p{%s,%p},%d", - cookie, cookie->def->name, cookie->netfs_data, retire); + _enter("%p{%s,%p,%d},%d", + cookie, cookie->def->name, cookie->netfs_data, + atomic_read(&cookie->n_active), retire); + + ASSERTCMP(atomic_read(&cookie->n_active), >, 0); if (atomic_read(&cookie->n_children) != 0) { printk(KERN_ERR "FS-Cache: Cookie '%s' still has children\n", @@ -490,62 +495,28 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire) BUG(); } - /* wait for the cookie to finish being instantiated (or to fail) */ - if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) { - fscache_stat(&fscache_n_relinquishes_waitcrt); - wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING, - fscache_wait_bit, TASK_UNINTERRUPTIBLE); - } - - event = retire ? FSCACHE_OBJECT_EV_RETIRE : FSCACHE_OBJECT_EV_RELEASE; + /* No further netfs-accessing operations on this cookie permitted */ + set_bit(FSCACHE_COOKIE_RELINQUISHED, &cookie->flags); + if (retire) + set_bit(FSCACHE_COOKIE_RETIRED, &cookie->flags); -try_again: spin_lock(&cookie->lock); - - /* break links with all the active objects */ - while (!hlist_empty(&cookie->backing_objects)) { - int n_reads; - object = hlist_entry(cookie->backing_objects.first, - struct fscache_object, - cookie_link); - - _debug("RELEASE OBJ%x", object->debug_id); - - set_bit(FSCACHE_COOKIE_WAITING_ON_READS, &cookie->flags); - n_reads = atomic_read(&object->n_reads); - if (n_reads) { - int n_ops = object->n_ops; - int n_in_progress = object->n_in_progress; - spin_unlock(&cookie->lock); - printk(KERN_ERR "FS-Cache:" - " Cookie '%s' still has %d outstanding reads (%d,%d)\n", - cookie->def->name, - n_reads, n_ops, n_in_progress); - wait_on_bit(&cookie->flags, FSCACHE_COOKIE_WAITING_ON_READS, - fscache_wait_bit, TASK_UNINTERRUPTIBLE); - printk("Wait finished\n"); - goto try_again; - } - - /* detach each cache object from the object cookie */ - spin_lock(&object->lock); - hlist_del_init(&object->cookie_link); - - cache = object->cache; - object->cookie = NULL; - fscache_raise_event(object, event); - spin_unlock(&object->lock); - - if (atomic_dec_and_test(&cookie->usage)) - /* the cookie refcount shouldn't be reduced to 0 yet */ - BUG(); + hlist_for_each_entry(object, &cookie->backing_objects, cookie_link) { + fscache_raise_event(object, FSCACHE_OBJECT_EV_KILL); } + spin_unlock(&cookie->lock); - /* detach pointers back to the netfs */ + /* Wait for cessation of activity requiring access to the netfs (when + * n_active reaches 0). + */ + if (!atomic_dec_and_test(&cookie->n_active)) + wait_on_atomic_t(&cookie->n_active, fscache_wait_atomic_t, + TASK_UNINTERRUPTIBLE); + + /* Clear pointers back to the netfs */ cookie->netfs_data = NULL; cookie->def = NULL; - - spin_unlock(&cookie->lock); + BUG_ON(cookie->stores.rnode); if (cookie->parent) { ASSERTCMP(atomic_read(&cookie->parent->usage), >, 0); @@ -553,7 +524,7 @@ try_again: atomic_dec(&cookie->parent->n_children); } - /* finally dispose of the cookie */ + /* Dispose of the netfs's link to the cookie */ ASSERTCMP(atomic_read(&cookie->usage), >, 0); fscache_cookie_put(cookie); diff --git a/fs/fscache/fsdef.c b/fs/fscache/fsdef.c index f5b4baee7352..10a2ade0bdf8 100644 --- a/fs/fscache/fsdef.c +++ b/fs/fscache/fsdef.c @@ -55,6 +55,7 @@ static struct fscache_cookie_def fscache_fsdef_index_def = { struct fscache_cookie fscache_fsdef_index = { .usage = ATOMIC_INIT(1), + .n_active = ATOMIC_INIT(1), .lock = __SPIN_LOCK_UNLOCKED(fscache_fsdef_index.lock), .backing_objects = HLIST_HEAD_INIT, .def = &fscache_fsdef_index_def, diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h index ee38fef4be51..12d505bedb5c 100644 --- a/fs/fscache/internal.h +++ b/fs/fscache/internal.h @@ -93,14 +93,11 @@ static inline bool fscache_object_congested(void) extern int fscache_wait_bit(void *); extern int fscache_wait_bit_interruptible(void *); +extern int fscache_wait_atomic_t(atomic_t *); /* * object.c */ -extern const char fscache_object_states_short[FSCACHE_OBJECT__NSTATES][5]; - -extern void fscache_withdrawing_object(struct fscache_cache *, - struct fscache_object *); extern void fscache_enqueue_object(struct fscache_object *); /* @@ -110,8 +107,10 @@ extern void fscache_enqueue_object(struct fscache_object *); extern const struct file_operations fscache_objlist_fops; extern void fscache_objlist_add(struct fscache_object *); +extern void fscache_objlist_remove(struct fscache_object *); #else #define fscache_objlist_add(object) do {} while(0) +#define fscache_objlist_remove(object) do {} while(0) #endif /* @@ -291,6 +290,10 @@ static inline void fscache_raise_event(struct fscache_object *object, unsigned event) { BUG_ON(event >= NR_FSCACHE_OBJECT_EVENTS); +#if 0 + printk("*** fscache_raise_event(OBJ%d{%lx},%x)\n", + object->debug_id, object->event_mask, (1 << event)); +#endif if (!test_and_set_bit(event, &object->events) && test_bit(event, &object->event_mask)) fscache_enqueue_object(object); diff --git a/fs/fscache/main.c b/fs/fscache/main.c index f9d856773f79..7c27907e650c 100644 --- a/fs/fscache/main.c +++ b/fs/fscache/main.c @@ -205,7 +205,6 @@ int fscache_wait_bit(void *flags) schedule(); return 0; } -EXPORT_SYMBOL(fscache_wait_bit); /* * wait_on_bit() sleep function for interruptible waiting @@ -215,4 +214,12 @@ int fscache_wait_bit_interruptible(void *flags) schedule(); return signal_pending(current); } -EXPORT_SYMBOL(fscache_wait_bit_interruptible); + +/* + * wait_on_atomic_t() sleep function for uninterruptible waiting + */ +int fscache_wait_atomic_t(atomic_t *p) +{ + schedule(); + return 0; +} diff --git a/fs/fscache/netfs.c b/fs/fscache/netfs.c index e028b8eb1c40..b1bb6117473a 100644 --- a/fs/fscache/netfs.c +++ b/fs/fscache/netfs.c @@ -40,6 +40,7 @@ int __fscache_register_netfs(struct fscache_netfs *netfs) /* initialise the primary index cookie */ atomic_set(&netfs->primary_index->usage, 1); atomic_set(&netfs->primary_index->n_children, 0); + atomic_set(&netfs->primary_index->n_active, 1); netfs->primary_index->def = &fscache_fsdef_netfs_def; netfs->primary_index->parent = &fscache_fsdef_index; diff --git a/fs/fscache/object-list.c b/fs/fscache/object-list.c index f27c89d17885..e1959efad64f 100644 --- a/fs/fscache/object-list.c +++ b/fs/fscache/object-list.c @@ -70,13 +70,10 @@ void fscache_objlist_add(struct fscache_object *obj) write_unlock(&fscache_object_list_lock); } -/** - * fscache_object_destroy - Note that a cache object is about to be destroyed - * @object: The object to be destroyed - * - * Note the imminent destruction and deallocation of a cache object record. +/* + * Remove an object from the object list. */ -void fscache_object_destroy(struct fscache_object *obj) +void fscache_objlist_remove(struct fscache_object *obj) { write_lock(&fscache_object_list_lock); @@ -85,7 +82,6 @@ void fscache_object_destroy(struct fscache_object *obj) write_unlock(&fscache_object_list_lock); } -EXPORT_SYMBOL(fscache_object_destroy); /* * find the object in the tree on or after the specified index @@ -166,15 +162,14 @@ static int fscache_objlist_show(struct seq_file *m, void *v) { struct fscache_objlist_data *data = m->private; struct fscache_object *obj = v; + struct fscache_cookie *cookie; unsigned long config = data->config; - uint16_t keylen, auxlen; char _type[3], *type; - bool no_cookie; u8 *buf = data->buf, *p; if ((unsigned long) v == 1) { seq_puts(m, "OBJECT PARENT STAT CHLDN OPS OOP IPR EX READS" - " EM EV F S" + " EM EV FL S" " | NETFS_COOKIE_DEF TY FL NETFS_DATA"); if (config & (FSCACHE_OBJLIST_CONFIG_KEY | FSCACHE_OBJLIST_CONFIG_AUX)) @@ -193,7 +188,7 @@ static int fscache_objlist_show(struct seq_file *m, void *v) if ((unsigned long) v == 2) { seq_puts(m, "======== ======== ==== ===== === === === == =====" - " == == = =" + " == == == =" " | ================ == == ================"); if (config & (FSCACHE_OBJLIST_CONFIG_KEY | FSCACHE_OBJLIST_CONFIG_AUX)) @@ -216,10 +211,11 @@ static int fscache_objlist_show(struct seq_file *m, void *v) } \ } while(0) + cookie = obj->cookie; if (~config) { - FILTER(obj->cookie, + FILTER(cookie->def, COOKIE, NOCOOKIE); - FILTER(obj->state != FSCACHE_OBJECT_ACTIVE || + FILTER(fscache_object_is_active(obj) || obj->n_ops != 0 || obj->n_obj_ops != 0 || obj->flags || @@ -235,10 +231,10 @@ static int fscache_objlist_show(struct seq_file *m, void *v) } seq_printf(m, - "%8x %8x %s %5u %3u %3u %3u %2u %5u %2lx %2lx %1lx %1x | ", + "%8x %8x %s %5u %3u %3u %3u %2u %5u %2lx %2lx %2lx %1x | ", obj->debug_id, obj->parent ? obj->parent->debug_id : -1, - fscache_object_states_short[obj->state], + obj->state->short_name, obj->n_children, obj->n_ops, obj->n_obj_ops, @@ -250,48 +246,40 @@ static int fscache_objlist_show(struct seq_file *m, void *v) obj->flags, work_busy(&obj->work)); - no_cookie = true; - keylen = auxlen = 0; - if (obj->cookie) { - spin_lock(&obj->lock); - if (obj->cookie) { - switch (obj->cookie->def->type) { - case 0: - type = "IX"; - break; - case 1: - type = "DT"; - break; - default: - sprintf(_type, "%02u", - obj->cookie->def->type); - type = _type; - break; - } + if (fscache_use_cookie(obj)) { + uint16_t keylen = 0, auxlen = 0; - seq_printf(m, "%-16s %s %2lx %16p", - obj->cookie->def->name, - type, - obj->cookie->flags, - obj->cookie->netfs_data); - - if (obj->cookie->def->get_key && - config & FSCACHE_OBJLIST_CONFIG_KEY) - keylen = obj->cookie->def->get_key( - obj->cookie->netfs_data, - buf, 400); - - if (obj->cookie->def->get_aux && - config & FSCACHE_OBJLIST_CONFIG_AUX) - auxlen = obj->cookie->def->get_aux( - obj->cookie->netfs_data, - buf + keylen, 512 - keylen); - - no_cookie = false; + switch (cookie->def->type) { + case 0: + type = "IX"; + break; + case 1: + type = "DT"; + break; + default: + sprintf(_type, "%02u", cookie->def->type); + type = _type; + break; } - spin_unlock(&obj->lock); - if (!no_cookie && (keylen > 0 || auxlen > 0)) { + seq_printf(m, "%-16s %s %2lx %16p", + cookie->def->name, + type, + cookie->flags, + cookie->netfs_data); + + if (cookie->def->get_key && + config & FSCACHE_OBJLIST_CONFIG_KEY) + keylen = cookie->def->get_key(cookie->netfs_data, + buf, 400); + + if (cookie->def->get_aux && + config & FSCACHE_OBJLIST_CONFIG_AUX) + auxlen = cookie->def->get_aux(cookie->netfs_data, + buf + keylen, 512 - keylen); + fscache_unuse_cookie(obj); + + if (keylen > 0 || auxlen > 0) { seq_printf(m, " "); for (p = buf; keylen > 0; keylen--) seq_printf(m, "%02x", *p++); @@ -302,12 +290,11 @@ static int fscache_objlist_show(struct seq_file *m, void *v) seq_printf(m, "%02x", *p++); } } - } - if (no_cookie) - seq_printf(m, "<no_cookie>\n"); - else seq_printf(m, "\n"); + } else { + seq_printf(m, "<no_netfs>\n"); + } return 0; } diff --git a/fs/fscache/object.c b/fs/fscache/object.c index 50d41c180211..86d75a60b20c 100644 --- a/fs/fscache/object.c +++ b/fs/fscache/object.c @@ -15,52 +15,131 @@ #define FSCACHE_DEBUG_LEVEL COOKIE #include <linux/module.h> #include <linux/slab.h> +#include <linux/prefetch.h> #include "internal.h" -const char *fscache_object_states[FSCACHE_OBJECT__NSTATES] = { - [FSCACHE_OBJECT_INIT] = "OBJECT_INIT", - [FSCACHE_OBJECT_LOOKING_UP] = "OBJECT_LOOKING_UP", - [FSCACHE_OBJECT_CREATING] = "OBJECT_CREATING", - [FSCACHE_OBJECT_AVAILABLE] = "OBJECT_AVAILABLE", - [FSCACHE_OBJECT_ACTIVE] = "OBJECT_ACTIVE", - [FSCACHE_OBJECT_INVALIDATING] = "OBJECT_INVALIDATING", - [FSCACHE_OBJECT_UPDATING] = "OBJECT_UPDATING", - [FSCACHE_OBJECT_DYING] = "OBJECT_DYING", - [FSCACHE_OBJECT_LC_DYING] = "OBJECT_LC_DYING", - [FSCACHE_OBJECT_ABORT_INIT] = "OBJECT_ABORT_INIT", - [FSCACHE_OBJECT_RELEASING] = "OBJECT_RELEASING", - [FSCACHE_OBJECT_RECYCLING] = "OBJECT_RECYCLING", - [FSCACHE_OBJECT_WITHDRAWING] = "OBJECT_WITHDRAWING", - [FSCACHE_OBJECT_DEAD] = "OBJECT_DEAD", +static const struct fscache_state *fscache_abort_initialisation(struct fscache_object *, int); +static const struct fscache_state *fscache_kill_dependents(struct fscache_object *, int); +static const struct fscache_state *fscache_drop_object(struct fscache_object *, int); +static const struct fscache_state *fscache_initialise_object(struct fscache_object *, int); +static const struct fscache_state *fscache_invalidate_object(struct fscache_object *, int); +static const struct fscache_state *fscache_jumpstart_dependents(struct fscache_object *, int); +static const struct fscache_state *fscache_kill_object(struct fscache_object *, int); +static const struct fscache_state *fscache_lookup_failure(struct fscache_object *, int); +static const struct fscache_state *fscache_look_up_object(struct fscache_object *, int); +static const struct fscache_state *fscache_object_available(struct fscache_object *, int); +static const struct fscache_state *fscache_parent_ready(struct fscache_object *, int); +static const struct fscache_state *fscache_update_object(struct fscache_object *, int); + +#define __STATE_NAME(n) fscache_osm_##n +#define STATE(n) (&__STATE_NAME(n)) + +/* + * Define a work state. Work states are execution states. No event processing + * is performed by them. The function attached to a work state returns a + * pointer indicating the next state to which the state machine should + * transition. Returning NO_TRANSIT repeats the current state, but goes back + * to the scheduler first. + */ +#define WORK_STATE(n, sn, f) \ + const struct fscache_state __STATE_NAME(n) = { \ + .name = #n, \ + .short_name = sn, \ + .work = f \ + } + +/* + * Returns from work states. + */ +#define transit_to(state) ({ prefetch(&STATE(state)->work); STATE(state); }) + +#define NO_TRANSIT ((struct fscache_state *)NULL) + +/* + * Define a wait state. Wait states are event processing states. No execution + * is performed by them. Wait states are just tables of "if event X occurs, + * clear it and transition to state Y". The dispatcher returns to the + * scheduler if none of the events in which the wait state has an interest are + * currently pending. + */ +#define WAIT_STATE(n, sn, ...) \ + const struct fscache_state __STATE_NAME(n) = { \ + .name = #n, \ + .short_name = sn, \ + .work = NULL, \ + .transitions = { __VA_ARGS__, { 0, NULL } } \ + } + +#define TRANSIT_TO(state, emask) \ + { .events = (emask), .transit_to = STATE(state) } + +/* + * The object state machine. + */ +static WORK_STATE(INIT_OBJECT, "INIT", fscache_initialise_object); +static WORK_STATE(PARENT_READY, "PRDY", fscache_parent_ready); +static WORK_STATE(ABORT_INIT, "ABRT", fscache_abort_initialisation); +static WORK_STATE(LOOK_UP_OBJECT, "LOOK", fscache_look_up_object); +static WORK_STATE(CREATE_OBJECT, "CRTO", fscache_look_up_object); +static WORK_STATE(OBJECT_AVAILABLE, "AVBL", fscache_object_available); +static WORK_STATE(JUMPSTART_DEPS, "JUMP", fscache_jumpstart_dependents); + +static WORK_STATE(INVALIDATE_OBJECT, "INVL", fscache_invalidate_object); +static WORK_STATE(UPDATE_OBJECT, "UPDT", fscache_update_object); + +static WORK_STATE(LOOKUP_FAILURE, "LCFL", fscache_lookup_failure); +static WORK_STATE(KILL_OBJECT, "KILL", fscache_kill_object); +static WORK_STATE(KILL_DEPENDENTS, "KDEP", fscache_kill_dependents); +static WORK_STATE(DROP_OBJECT, "DROP", fscache_drop_object); +static WORK_STATE(OBJECT_DEAD, "DEAD", (void*)2UL); + +static WAIT_STATE(WAIT_FOR_INIT, "?INI", + TRANSIT_TO(INIT_OBJECT, 1 << FSCACHE_OBJECT_EV_NEW_CHILD)); + +static WAIT_STATE(WAIT_FOR_PARENT, "?PRN", + TRANSIT_TO(PARENT_READY, 1 << FSCACHE_OBJECT_EV_PARENT_READY)); + +static WAIT_STATE(WAIT_FOR_CMD, "?CMD", + TRANSIT_TO(INVALIDATE_OBJECT, 1 << FSCACHE_OBJECT_EV_INVALIDATE), + TRANSIT_TO(UPDATE_OBJECT, 1 << FSCACHE_OBJECT_EV_UPDATE), + TRANSIT_TO(JUMPSTART_DEPS, 1 << FSCACHE_OBJECT_EV_NEW_CHILD)); + +static WAIT_STATE(WAIT_FOR_CLEARANCE, "?CLR", + TRANSIT_TO(KILL_OBJECT, 1 << FSCACHE_OBJECT_EV_CLEARED)); + +/* + * Out-of-band event transition tables. These are for handling unexpected + * events, such as an I/O error. If an OOB event occurs, the state machine + * clears and disables the event and forces a transition to the nominated work + * state (acurrently executing work states will complete first). + * + * In such a situation, object->state remembers the state the machine should + * have been in/gone to and returning NO_TRANSIT returns to that. + */ +static const struct fscache_transition fscache_osm_init_oob[] = { + TRANSIT_TO(ABORT_INIT, + (1 << FSCACHE_OBJECT_EV_ERROR) | + (1 << FSCACHE_OBJECT_EV_KILL)), + { 0, NULL } +}; + +static const struct fscache_transition fscache_osm_lookup_oob[] = { + TRANSIT_TO(LOOKUP_FAILURE, + (1 << FSCACHE_OBJECT_EV_ERROR) | + (1 << FSCACHE_OBJECT_EV_KILL)), + { 0, NULL } }; -EXPORT_SYMBOL(fscache_object_states); - -const char fscache_object_states_short[FSCACHE_OBJECT__NSTATES][5] = { - [FSCACHE_OBJECT_INIT] = "INIT", - [FSCACHE_OBJECT_LOOKING_UP] = "LOOK", - [FSCACHE_OBJECT_CREATING] = "CRTN", - [FSCACHE_OBJECT_AVAILABLE] = "AVBL", - [FSCACHE_OBJECT_ACTIVE] = "ACTV", - [FSCACHE_OBJECT_INVALIDATING] = "INVL", - [FSCACHE_OBJECT_UPDATING] = "UPDT", - [FSCACHE_OBJECT_DYING] = "DYNG", - [FSCACHE_OBJECT_LC_DYING] = "LCDY", - [FSCACHE_OBJECT_ABORT_INIT] = "ABTI", - [FSCACHE_OBJECT_RELEASING] = "RELS", - [FSCACHE_OBJECT_RECYCLING] = "RCYC", - [FSCACHE_OBJECT_WITHDRAWING] = "WTHD", - [FSCACHE_OBJECT_DEAD] = "DEAD", + +static const struct fscache_transition fscache_osm_run_oob[] = { + TRANSIT_TO(KILL_OBJECT, + (1 << FSCACHE_OBJECT_EV_ERROR) | + (1 << FSCACHE_OBJECT_EV_KILL)), + { 0, NULL } }; static int fscache_get_object(struct fscache_object *); static void fscache_put_object(struct fscache_object *); -static void fscache_initialise_object(struct fscache_object *); -static void fscache_lookup_object(struct fscache_object *); -static void fscache_object_available(struct fscache_object *); -static void fscache_invalidate_object(struct fscache_object *); -static void fscache_release_object(struct fscache_object *); -static void fscache_withdraw_object(struct fscache_object *); -static void fscache_enqueue_dependents(struct fscache_object *); +static bool fscache_enqueue_dependents(struct fscache_object *, int); static void fscache_dequeue_object(struct fscache_object *); /* @@ -75,295 +154,116 @@ static inline void fscache_done_parent_op(struct fscache_object *object) object->debug_id, parent->debug_id, parent->n_ops); spin_lock_nested(&parent->lock, 1); - parent->n_ops--; parent->n_obj_ops--; + parent->n_ops--; if (parent->n_ops == 0) fscache_raise_event(parent, FSCACHE_OBJECT_EV_CLEARED); spin_unlock(&parent->lock); } /* - * Notify netfs of invalidation completion. + * Object state machine dispatcher. */ -static inline void fscache_invalidation_complete(struct fscache_cookie *cookie) +static void fscache_object_sm_dispatcher(struct fscache_object *object) { - if (test_and_clear_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) - wake_up_bit(&cookie->flags, FSCACHE_COOKIE_INVALIDATING); -} - -/* - * process events that have been sent to an object's state machine - * - initiates parent lookup - * - does object lookup - * - does object creation - * - does object recycling and retirement - * - does object withdrawal - */ -static void fscache_object_state_machine(struct fscache_object *object) -{ - enum fscache_object_state new_state; - struct fscache_cookie *cookie; - int event; + const struct fscache_transition *t; + const struct fscache_state *state, *new_state; + unsigned long events, event_mask; + int event = -1; ASSERT(object != NULL); _enter("{OBJ%x,%s,%lx}", - object->debug_id, fscache_object_states[object->state], - object->events); - - switch (object->state) { - /* wait for the parent object to become ready */ - case FSCACHE_OBJECT_INIT: - object->event_mask = - FSCACHE_OBJECT_EVENTS_MASK & - ~(1 << FSCACHE_OBJECT_EV_CLEARED); - fscache_initialise_object(object); - goto done; - - /* look up the object metadata on disk */ - case FSCACHE_OBJECT_LOOKING_UP: - fscache_lookup_object(object); - goto lookup_transit; - - /* create the object metadata on disk */ - case FSCACHE_OBJECT_CREATING: - fscache_lookup_object(object); - goto lookup_transit; - - /* handle an object becoming available; start pending - * operations and queue dependent operations for processing */ - case FSCACHE_OBJECT_AVAILABLE: - fscache_object_available(object); - goto active_transit; - - /* normal running state */ - case FSCACHE_OBJECT_ACTIVE: - goto active_transit; - - /* Invalidate an object on disk */ - case FSCACHE_OBJECT_INVALIDATING: - clear_bit(FSCACHE_OBJECT_EV_INVALIDATE, &object->events); - fscache_stat(&fscache_n_invalidates_run); - fscache_stat(&fscache_n_cop_invalidate_object); - fscache_invalidate_object(object); - fscache_stat_d(&fscache_n_cop_invalidate_object); - fscache_raise_event(object, FSCACHE_OBJECT_EV_UPDATE); - goto active_transit; - - /* update the object metadata on disk */ - case FSCACHE_OBJECT_UPDATING: - clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events); - fscache_stat(&fscache_n_updates_run); - fscache_stat(&fscache_n_cop_update_object); - object->cache->ops->update_object(object); - fscache_stat_d(&fscache_n_cop_update_object); - goto active_transit; - - /* handle an object dying during lookup or creation */ - case FSCACHE_OBJECT_LC_DYING: - object->event_mask &= ~(1 << FSCACHE_OBJECT_EV_UPDATE); - fscache_stat(&fscache_n_cop_lookup_complete); - object->cache->ops->lookup_complete(object); - fscache_stat_d(&fscache_n_cop_lookup_complete); - - spin_lock(&object->lock); - object->state = FSCACHE_OBJECT_DYING; - cookie = object->cookie; - if (cookie) { - if (test_and_clear_bit(FSCACHE_COOKIE_LOOKING_UP, - &cookie->flags)) - wake_up_bit(&cookie->flags, - FSCACHE_COOKIE_LOOKING_UP); - if (test_and_clear_bit(FSCACHE_COOKIE_CREATING, - &cookie->flags)) - wake_up_bit(&cookie->flags, - FSCACHE_COOKIE_CREATING); + object->debug_id, object->state->name, object->events); + + event_mask = object->event_mask; +restart: + object->event_mask = 0; /* Mask normal event handling */ + state = object->state; +restart_masked: + events = object->events; + + /* Handle any out-of-band events (typically an error) */ + if (events & object->oob_event_mask) { + _debug("{OBJ%x} oob %lx", + object->debug_id, events & object->oob_event_mask); + for (t = object->oob_table; t->events; t++) { + if (events & t->events) { + state = t->transit_to; + ASSERT(state->work != NULL); + event = fls(events & t->events) - 1; + __clear_bit(event, &object->oob_event_mask); + clear_bit(event, &object->events); + goto execute_work_state; + } } - spin_unlock(&object->lock); + } - fscache_done_parent_op(object); + /* Wait states are just transition tables */ + if (!state->work) { + if (events & event_mask) { + for (t = state->transitions; t->events; t++) { + if (events & t->events) { + new_state = t->transit_to; + event = fls(events & t->events) - 1; + clear_bit(event, &object->events); + _debug("{OBJ%x} ev %d: %s -> %s", + object->debug_id, event, + state->name, new_state->name); + object->state = state = new_state; + goto execute_work_state; + } + } - /* wait for completion of all active operations on this object - * and the death of all child objects of this object */ - case FSCACHE_OBJECT_DYING: - dying: - clear_bit(FSCACHE_OBJECT_EV_CLEARED, &object->events); - spin_lock(&object->lock); - _debug("dying OBJ%x {%d,%d}", - object->debug_id, object->n_ops, object->n_children); - if (object->n_ops == 0 && object->n_children == 0) { - object->event_mask &= - ~(1 << FSCACHE_OBJECT_EV_CLEARED); - object->event_mask |= - (1 << FSCACHE_OBJECT_EV_WITHDRAW) | - (1 << FSCACHE_OBJECT_EV_RETIRE) | - (1 << FSCACHE_OBJECT_EV_RELEASE) | - (1 << FSCACHE_OBJECT_EV_ERROR); - } else { - object->event_mask &= - ~((1 << FSCACHE_OBJECT_EV_WITHDRAW) | - (1 << FSCACHE_OBJECT_EV_RETIRE) | - (1 << FSCACHE_OBJECT_EV_RELEASE) | - (1 << FSCACHE_OBJECT_EV_ERROR)); - object->event_mask |= - 1 << FSCACHE_OBJECT_EV_CLEARED; + /* The event mask didn't include all the tabled bits */ + BUG(); } - spin_unlock(&object->lock); - fscache_enqueue_dependents(object); - fscache_start_operations(object); - goto terminal_transit; - - /* handle an abort during initialisation */ - case FSCACHE_OBJECT_ABORT_INIT: - _debug("handle abort init %lx", object->events); - object->event_mask &= ~(1 << FSCACHE_OBJECT_EV_UPDATE); - - spin_lock(&object->lock); - fscache_dequeue_object(object); - - object->state = FSCACHE_OBJECT_DYING; - if (test_and_clear_bit(FSCACHE_COOKIE_CREATING, - &object->cookie->flags)) - wake_up_bit(&object->cookie->flags, - FSCACHE_COOKIE_CREATING); - spin_unlock(&object->lock); - goto dying; - - /* handle the netfs releasing an object and possibly marking it - * obsolete too */ - case FSCACHE_OBJECT_RELEASING: - case FSCACHE_OBJECT_RECYCLING: - object->event_mask &= - ~((1 << FSCACHE_OBJECT_EV_WITHDRAW) | - (1 << FSCACHE_OBJECT_EV_RETIRE) | - (1 << FSCACHE_OBJECT_EV_RELEASE) | - (1 << FSCACHE_OBJECT_EV_ERROR)); - fscache_release_object(object); - spin_lock(&object->lock); - object->state = FSCACHE_OBJECT_DEAD; - spin_unlock(&object->lock); - fscache_stat(&fscache_n_object_dead); - goto terminal_transit; - - /* handle the parent cache of this object being withdrawn from - * active service */ - case FSCACHE_OBJECT_WITHDRAWING: - object->event_mask &= - ~((1 << FSCACHE_OBJECT_EV_WITHDRAW) | - (1 << FSCACHE_OBJECT_EV_RETIRE) | - (1 << FSCACHE_OBJECT_EV_RELEASE) | - (1 << FSCACHE_OBJECT_EV_ERROR)); - fscache_withdraw_object(object); - spin_lock(&object->lock); - object->state = FSCACHE_OBJECT_DEAD; - spin_unlock(&object->lock); - fscache_stat(&fscache_n_object_dead); - goto terminal_transit; - - /* complain about the object being woken up once it is - * deceased */ - case FSCACHE_OBJECT_DEAD: - printk(KERN_ERR "FS-Cache:" - " Unexpected event in dead state %lx\n", - object->events & object->event_mask); - BUG(); - - default: - printk(KERN_ERR "FS-Cache: Unknown object state %u\n", - object->state); - BUG(); - } - - /* determine the transition from a lookup state */ -lookup_transit: - event = fls(object->events & object->event_mask) - 1; - switch (event) { - case FSCACHE_OBJECT_EV_WITHDRAW: - case FSCACHE_OBJECT_EV_RETIRE: - case FSCACHE_OBJECT_EV_RELEASE: - case FSCACHE_OBJECT_EV_ERROR: - new_state = FSCACHE_OBJECT_LC_DYING; - goto change_state; - case FSCACHE_OBJECT_EV_INVALIDATE: - new_state = FSCACHE_OBJECT_INVALIDATING; - goto change_state; - case FSCACHE_OBJECT_EV_REQUEUE: - goto done; - case -1: - goto done; /* sleep until event */ - default: - goto unsupported_event; + /* Randomly woke up */ + goto unmask_events; } - /* determine the transition from an active state */ -active_transit: - event = fls(object->events & object->event_mask) - 1; - switch (event) { - case FSCACHE_OBJECT_EV_WITHDRAW: - case FSCACHE_OBJECT_EV_RETIRE: - case FSCACHE_OBJECT_EV_RELEASE: - case FSCACHE_OBJECT_EV_ERROR: - new_state = FSCACHE_OBJECT_DYING; - goto change_state; - case FSCACHE_OBJECT_EV_INVALIDATE: - new_state = FSCACHE_OBJECT_INVALIDATING; - goto change_state; - case FSCACHE_OBJECT_EV_UPDATE: - new_state = FSCACHE_OBJECT_UPDATING; - goto change_state; - case -1: - new_state = FSCACHE_OBJECT_ACTIVE; - goto change_state; /* sleep until event */ - default: - goto unsupported_event; - } +execute_work_state: + _debug("{OBJ%x} exec %s", object->debug_id, state->name); - /* determine the transition from a terminal state */ -terminal_transit: - event = fls(object->events & object->event_mask) - 1; - switch (event) { - case FSCACHE_OBJECT_EV_WITHDRAW: - new_state = FSCACHE_OBJECT_WITHDRAWING; - goto change_state; - case FSCACHE_OBJECT_EV_RETIRE: - new_state = FSCACHE_OBJECT_RECYCLING; - goto change_state; - case FSCACHE_OBJECT_EV_RELEASE: - new_state = FSCACHE_OBJECT_RELEASING; - goto change_state; - case FSCACHE_OBJECT_EV_ERROR: - new_state = FSCACHE_OBJECT_WITHDRAWING; - goto change_state; - case FSCACHE_OBJECT_EV_CLEARED: - new_state = FSCACHE_OBJECT_DYING; - goto change_state; - case -1: - goto done; /* sleep until event */ - default: - goto unsupported_event; + new_state = state->work(object, event); + event = -1; + if (new_state == NO_TRANSIT) { + _debug("{OBJ%x} %s notrans", object->debug_id, state->name); + fscache_enqueue_object(object); + event_mask = object->oob_event_mask; + goto unmask_events; } -change_state: - spin_lock(&object->lock); - object->state = new_state; - spin_unlock(&object->lock); + _debug("{OBJ%x} %s -> %s", + object->debug_id, state->name, new_state->name); + object->state = state = new_state; -done: - _leave(" [->%s]", fscache_object_states[object->state]); - return; + if (state->work) { + if (unlikely(state->work == ((void *)2UL))) { + _leave(" [dead]"); + return; + } + goto restart_masked; + } -unsupported_event: - printk(KERN_ERR "FS-Cache:" - " Unsupported event %d [%lx/%lx] in state %s\n", - event, object->events, object->event_mask, - fscache_object_states[object->state]); - BUG(); + /* Transited to wait state */ + event_mask = object->oob_event_mask; + for (t = state->transitions; t->events; t++) + event_mask |= t->events; + +unmask_events: + object->event_mask = event_mask; + smp_mb(); + events = object->events; + if (events & event_mask) + goto restart; + _leave(" [msk %lx]", event_mask); } /* * execute an object */ -void fscache_object_work_func(struct work_struct *work) +static void fscache_object_work_func(struct work_struct *work) { struct fscache_object *object = container_of(work, struct fscache_object, work); @@ -372,14 +272,70 @@ void fscache_object_work_func(struct work_struct *work) _enter("{OBJ%x}", object->debug_id); start = jiffies; - fscache_object_state_machine(object); + fscache_object_sm_dispatcher(object); fscache_hist(fscache_objs_histogram, start); - if (object->events & object->event_mask) - fscache_enqueue_object(object); - clear_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events); fscache_put_object(object); } -EXPORT_SYMBOL(fscache_object_work_func); + +/** + * fscache_object_init - Initialise a cache object description + * @object: Object description + * @cookie: Cookie object will be attached to + * @cache: Cache in which backing object will be found + * + * Initialise a cache object description to its basic values. + * + * See Documentation/filesystems/caching/backend-api.txt for a complete + * description. + */ +void fscache_object_init(struct fscache_object *object, + struct fscache_cookie *cookie, + struct fscache_cache *cache) +{ + const struct fscache_transition *t; + + atomic_inc(&cache->object_count); + + object->state = STATE(WAIT_FOR_INIT); + object->oob_table = fscache_osm_init_oob; + object->flags = 1 << FSCACHE_OBJECT_IS_LIVE; + spin_lock_init(&object->lock); + INIT_LIST_HEAD(&object->cache_link); + INIT_HLIST_NODE(&object->cookie_link); + INIT_WORK(&object->work, fscache_object_work_func); + INIT_LIST_HEAD(&object->dependents); + INIT_LIST_HEAD(&object->dep_link); + INIT_LIST_HEAD(&object->pending_ops); + object->n_children = 0; + object->n_ops = object->n_in_progress = object->n_exclusive = 0; + object->events = 0; + object->store_limit = 0; + object->store_limit_l = 0; + object->cache = cache; + object->cookie = cookie; + object->parent = NULL; + + object->oob_event_mask = 0; + for (t = object->oob_table; t->events; t++) + object->oob_event_mask |= t->events; + object->event_mask = object->oob_event_mask; + for (t = object->state->transitions; t->events; t++) + object->event_mask |= t->events; +} +EXPORT_SYMBOL(fscache_object_init); + +/* + * Abort object initialisation before we start it. + */ +static const struct fscache_state *fscache_abort_initialisation(struct fscache_object *object, + int event) +{ + _enter("{OBJ%x},%d", object->debug_id, event); + + object->oob_event_mask = 0; + fscache_dequeue_object(object); + return transit_to(KILL_OBJECT); +} /* * initialise an object @@ -387,130 +343,136 @@ EXPORT_SYMBOL(fscache_object_work_func); * immediately to do a creation * - we may need to start the process of creating a parent and we need to wait * for the parent's lookup and creation to complete if it's not there yet - * - an object's cookie is pinned until we clear FSCACHE_COOKIE_CREATING on the - * leaf-most cookies of the object and all its children */ -static void fscache_initialise_object(struct fscache_object *object) +static const struct fscache_state *fscache_initialise_object(struct fscache_object *object, + int event) { struct fscache_object *parent; + bool success; - _enter(""); - ASSERT(object->cookie != NULL); - ASSERT(object->cookie->parent != NULL); - - if (object->events & ((1 << FSCACHE_OBJECT_EV_ERROR) | - (1 << FSCACHE_OBJECT_EV_RELEASE) | - (1 << FSCACHE_OBJECT_EV_RETIRE) | - (1 << FSCACHE_OBJECT_EV_WITHDRAW))) { - _debug("abort init %lx", object->events); - spin_lock(&object->lock); - object->state = FSCACHE_OBJECT_ABORT_INIT; - spin_unlock(&object->lock); - return; - } + _enter("{OBJ%x},%d", object->debug_id, event); - spin_lock(&object->cookie->lock); - spin_lock_nested(&object->cookie->parent->lock, 1); + ASSERT(list_empty(&object->dep_link)); parent = object->parent; if (!parent) { - _debug("no parent"); - set_bit(FSCACHE_OBJECT_EV_WITHDRAW, &object->events); - } else { - spin_lock(&object->lock); - spin_lock_nested(&parent->lock, 1); - _debug("parent %s", fscache_object_states[parent->state]); - - if (parent->state >= FSCACHE_OBJECT_DYING) { - _debug("bad parent"); - set_bit(FSCACHE_OBJECT_EV_WITHDRAW, &object->events); - } else if (parent->state < FSCACHE_OBJECT_AVAILABLE) { - _debug("wait"); - - /* we may get woken up in this state by child objects - * binding on to us, so we need to make sure we don't - * add ourself to the list multiple times */ - if (list_empty(&object->dep_link)) { - fscache_stat(&fscache_n_cop_grab_object); - object->cache->ops->grab_object(object); - fscache_stat_d(&fscache_n_cop_grab_object); - list_add(&object->dep_link, - &parent->dependents); - - /* fscache_acquire_non_index_cookie() uses this - * to wake the chain up */ - if (parent->state == FSCACHE_OBJECT_INIT) - fscache_enqueue_object(parent); - } - } else { - _debug("go"); - parent->n_ops++; - parent->n_obj_ops++; - object->lookup_jif = jiffies; - object->state = FSCACHE_OBJECT_LOOKING_UP; - set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events); - } + _leave(" [no parent]"); + return transit_to(DROP_OBJECT); + } - spin_unlock(&parent->lock); - spin_unlock(&object->lock); + _debug("parent: %s of:%lx", parent->state->name, parent->flags); + + if (fscache_object_is_dying(parent)) { + _leave(" [bad parent]"); + return transit_to(DROP_OBJECT); } - spin_unlock(&object->cookie->parent->lock); - spin_unlock(&object->cookie->lock); + if (fscache_object_is_available(parent)) { + _leave(" [ready]"); + return transit_to(PARENT_READY); + } + + _debug("wait"); + + spin_lock(&parent->lock); + fscache_stat(&fscache_n_cop_grab_object); + success = false; + if (fscache_object_is_live(parent) && + object->cache->ops->grab_object(object)) { + list_add(&object->dep_link, &parent->dependents); + success = true; + } + fscache_stat_d(&fscache_n_cop_grab_object); + spin_unlock(&parent->lock); + if (!success) { + _leave(" [grab failed]"); + return transit_to(DROP_OBJECT); + } + + /* fscache_acquire_non_index_cookie() uses this + * to wake the chain up */ + fscache_raise_event(parent, FSCACHE_OBJECT_EV_NEW_CHILD); + _leave(" [wait]"); + return transit_to(WAIT_FOR_PARENT); +} + +/* + * Once the parent object is ready, we should kick off our lookup op. + */ +static const struct fscache_state *fscache_parent_ready(struct fscache_object *object, + int event) +{ + struct fscache_object *parent = object->parent; + + _enter("{OBJ%x},%d", object->debug_id, event); + + ASSERT(parent != NULL); + + spin_lock(&parent->lock); + parent->n_ops++; + parent->n_obj_ops++; + object->lookup_jif = jiffies; + spin_unlock(&parent->lock); + _leave(""); + return transit_to(LOOK_UP_OBJECT); } /* * look an object up in the cache from which it was allocated * - we hold an "access lock" on the parent object, so the parent object cannot * be withdrawn by either party till we've finished - * - an object's cookie is pinned until we clear FSCACHE_COOKIE_CREATING on the - * leaf-most cookies of the object and all its children */ -static void fscache_lookup_object(struct fscache_object *object) +static const struct fscache_state *fscache_look_up_object(struct fscache_object *object, + int event) { struct fscache_cookie *cookie = object->cookie; - struct fscache_object *parent; + struct fscache_object *parent = object->parent; int ret; - _enter(""); + _enter("{OBJ%x},%d", object->debug_id, event); + + object->oob_table = fscache_osm_lookup_oob; - parent = object->parent; ASSERT(parent != NULL); ASSERTCMP(parent->n_ops, >, 0); ASSERTCMP(parent->n_obj_ops, >, 0); /* make sure the parent is still available */ - ASSERTCMP(parent->state, >=, FSCACHE_OBJECT_AVAILABLE); - - if (parent->state >= FSCACHE_OBJECT_DYING || - test_bit(FSCACHE_IOERROR, &object->cache->flags)) { - _debug("unavailable"); - set_bit(FSCACHE_OBJECT_EV_WITHDRAW, &object->events); - _leave(""); - return; + ASSERT(fscache_object_is_available(parent)); + + if (fscache_object_is_dying(parent) || + test_bit(FSCACHE_IOERROR, &object->cache->flags) || + !fscache_use_cookie(object)) { + _leave(" [unavailable]"); + return transit_to(LOOKUP_FAILURE); } - _debug("LOOKUP \"%s/%s\" in \"%s\"", - parent->cookie->def->name, cookie->def->name, - object->cache->tag->name); + _debug("LOOKUP \"%s\" in \"%s\"", + cookie->def->name, object->cache->tag->name); fscache_stat(&fscache_n_object_lookups); fscache_stat(&fscache_n_cop_lookup_object); ret = object->cache->ops->lookup_object(object); fscache_stat_d(&fscache_n_cop_lookup_object); - if (test_bit(FSCACHE_OBJECT_EV_ERROR, &object->events)) - set_bit(FSCACHE_COOKIE_UNAVAILABLE, &cookie->flags); + fscache_unuse_cookie(object); if (ret == -ETIMEDOUT) { /* probably stuck behind another object, so move this one to * the back of the queue */ fscache_stat(&fscache_n_object_lookups_timed_out); - set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events); + _leave(" [timeout]"); + return NO_TRANSIT; } - _leave(""); + if (ret < 0) { + _leave(" [error]"); + return transit_to(LOOKUP_FAILURE); + } + + _leave(" [ok]"); + return transit_to(OBJECT_AVAILABLE); } /** @@ -524,32 +486,20 @@ void fscache_object_lookup_negative(struct fscache_object *object) { struct fscache_cookie *cookie = object->cookie; - _enter("{OBJ%x,%s}", - object->debug_id, fscache_object_states[object->state]); + _enter("{OBJ%x,%s}", object->debug_id, object->state->name); - spin_lock(&object->lock); - if (object->state == FSCACHE_OBJECT_LOOKING_UP) { + if (!test_and_set_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags)) { fscache_stat(&fscache_n_object_lookups_negative); - /* transit here to allow write requests to begin stacking up - * and read requests to begin returning ENODATA */ - object->state = FSCACHE_OBJECT_CREATING; - spin_unlock(&object->lock); - - set_bit(FSCACHE_COOKIE_PENDING_FILL, &cookie->flags); + /* Allow write requests to begin stacking up and read requests to begin + * returning ENODATA. + */ set_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags); _debug("wake up lookup %p", &cookie->flags); - smp_mb__before_clear_bit(); - clear_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags); - smp_mb__after_clear_bit(); + clear_bit_unlock(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags); wake_up_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP); - set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events); - } else { - ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING); - spin_unlock(&object->lock); } - _leave(""); } EXPORT_SYMBOL(fscache_object_lookup_negative); @@ -568,38 +518,26 @@ void fscache_obtained_object(struct fscache_object *object) { struct fscache_cookie *cookie = object->cookie; - _enter("{OBJ%x,%s}", - object->debug_id, fscache_object_states[object->state]); + _enter("{OBJ%x,%s}", object->debug_id, object->state->name); /* if we were still looking up, then we must have a positive lookup * result, in which case there may be data available */ - spin_lock(&object->lock); - if (object->state == FSCACHE_OBJECT_LOOKING_UP) { + if (!test_and_set_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags)) { fscache_stat(&fscache_n_object_lookups_positive); - clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags); + /* We do (presumably) have data */ + clear_bit_unlock(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags); - object->state = FSCACHE_OBJECT_AVAILABLE; - spin_unlock(&object->lock); - - smp_mb__before_clear_bit(); - clear_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags); - smp_mb__after_clear_bit(); + /* Allow write requests to begin stacking up and read requests + * to begin shovelling data. + */ + clear_bit_unlock(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags); wake_up_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP); - set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events); } else { - ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING); fscache_stat(&fscache_n_object_created); - - object->state = FSCACHE_OBJECT_AVAILABLE; - spin_unlock(&object->lock); - set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events); - smp_wmb(); } - if (test_and_clear_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) - wake_up_bit(&cookie->flags, FSCACHE_COOKIE_CREATING); - + set_bit(FSCACHE_OBJECT_IS_AVAILABLE, &object->flags); _leave(""); } EXPORT_SYMBOL(fscache_obtained_object); @@ -607,15 +545,14 @@ EXPORT_SYMBOL(fscache_obtained_object); /* * handle an object that has just become available */ -static void fscache_object_available(struct fscache_object *object) +static const struct fscache_state *fscache_object_available(struct fscache_object *object, + int event) { - _enter("{OBJ%x}", object->debug_id); + _enter("{OBJ%x},%d", object->debug_id, event); - spin_lock(&object->lock); + object->oob_table = fscache_osm_run_oob; - if (object->cookie && - test_and_clear_bit(FSCACHE_COOKIE_CREATING, &object->cookie->flags)) - wake_up_bit(&object->cookie->flags, FSCACHE_COOKIE_CREATING); + spin_lock(&object->lock); fscache_done_parent_op(object); if (object->n_in_progress == 0) { @@ -631,130 +568,158 @@ static void fscache_object_available(struct fscache_object *object) fscache_stat(&fscache_n_cop_lookup_complete); object->cache->ops->lookup_complete(object); fscache_stat_d(&fscache_n_cop_lookup_complete); - fscache_enqueue_dependents(object); fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif); fscache_stat(&fscache_n_object_avail); _leave(""); + return transit_to(JUMPSTART_DEPS); } /* - * drop an object's attachments + * Wake up this object's dependent objects now that we've become available. */ -static void fscache_drop_object(struct fscache_object *object) +static const struct fscache_state *fscache_jumpstart_dependents(struct fscache_object *object, + int event) { - struct fscache_object *parent = object->parent; - struct fscache_cache *cache = object->cache; + _enter("{OBJ%x},%d", object->debug_id, event); - _enter("{OBJ%x,%d}", object->debug_id, object->n_children); + if (!fscache_enqueue_dependents(object, FSCACHE_OBJECT_EV_PARENT_READY)) + return NO_TRANSIT; /* Not finished; requeue */ + return transit_to(WAIT_FOR_CMD); +} - ASSERTCMP(object->cookie, ==, NULL); - ASSERT(hlist_unhashed(&object->cookie_link)); +/* + * Handle lookup or creation failute. + */ +static const struct fscache_state *fscache_lookup_failure(struct fscache_object *object, + int event) +{ + struct fscache_cookie *cookie; - spin_lock(&cache->object_list_lock); - list_del_init(&object->cache_link); - spin_unlock(&cache->object_list_lock); + _enter("{OBJ%x},%d", object->debug_id, event); - fscache_stat(&fscache_n_cop_drop_object); - cache->ops->drop_object(object); - fscache_stat_d(&fscache_n_cop_drop_object); + object->oob_event_mask = 0; - if (parent) { - _debug("release parent OBJ%x {%d}", - parent->debug_id, parent->n_children); + fscache_stat(&fscache_n_cop_lookup_complete); + object->cache->ops->lookup_complete(object); + fscache_stat_d(&fscache_n_cop_lookup_complete); - spin_lock(&parent->lock); - parent->n_children--; - if (parent->n_children == 0) - fscache_raise_event(parent, FSCACHE_OBJECT_EV_CLEARED); - spin_unlock(&parent->lock); - object->parent = NULL; + cookie = object->cookie; + set_bit(FSCACHE_COOKIE_UNAVAILABLE, &cookie->flags); + if (test_and_clear_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags)) + wake_up_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP); + + fscache_done_parent_op(object); + return transit_to(KILL_OBJECT); +} + +/* + * Wait for completion of all active operations on this object and the death of + * all child objects of this object. + */ +static const struct fscache_state *fscache_kill_object(struct fscache_object *object, + int event) +{ + _enter("{OBJ%x,%d,%d},%d", + object->debug_id, object->n_ops, object->n_children, event); + + clear_bit(FSCACHE_OBJECT_IS_LIVE, &object->flags); + object->oob_event_mask = 0; + + if (list_empty(&object->dependents) && + object->n_ops == 0 && + object->n_children == 0) + return transit_to(DROP_OBJECT); + + if (object->n_in_progress == 0) { + spin_lock(&object->lock); + if (object->n_ops > 0 && object->n_in_progress == 0) + fscache_start_operations(object); + spin_unlock(&object->lock); } - /* this just shifts the object release to the work processor */ - fscache_put_object(object); + if (!list_empty(&object->dependents)) + return transit_to(KILL_DEPENDENTS); - _leave(""); + return transit_to(WAIT_FOR_CLEARANCE); } /* - * release or recycle an object that the netfs has discarded + * Kill dependent objects. */ -static void fscache_release_object(struct fscache_object *object) +static const struct fscache_state *fscache_kill_dependents(struct fscache_object *object, + int event) { - _enter(""); + _enter("{OBJ%x},%d", object->debug_id, event); - fscache_drop_object(object); + if (!fscache_enqueue_dependents(object, FSCACHE_OBJECT_EV_KILL)) + return NO_TRANSIT; /* Not finished */ + return transit_to(WAIT_FOR_CLEARANCE); } /* - * withdraw an object from active service + * Drop an object's attachments */ -static void fscache_withdraw_object(struct fscache_object *object) +static const struct fscache_state *fscache_drop_object(struct fscache_object *object, + int event) { - struct fscache_cookie *cookie; - bool detached; + struct fscache_object *parent = object->parent; + struct fscache_cookie *cookie = object->cookie; + struct fscache_cache *cache = object->cache; + bool awaken = false; - _enter(""); + _enter("{OBJ%x,%d},%d", object->debug_id, object->n_children, event); - spin_lock(&object->lock); - cookie = object->cookie; - if (cookie) { - /* need to get the cookie lock before the object lock, starting - * from the object pointer */ - atomic_inc(&cookie->usage); - spin_unlock(&object->lock); + ASSERT(cookie != NULL); + ASSERT(!hlist_unhashed(&object->cookie_link)); - detached = false; - spin_lock(&cookie->lock); - spin_lock(&object->lock); + /* Make sure the cookie no longer points here and that the netfs isn't + * waiting for us. + */ + spin_lock(&cookie->lock); + hlist_del_init(&object->cookie_link); + if (test_and_clear_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) + awaken = true; + spin_unlock(&cookie->lock); - if (object->cookie == cookie) { - hlist_del_init(&object->cookie_link); - object->cookie = NULL; - fscache_invalidation_complete(cookie); - detached = true; - } - spin_unlock(&cookie->lock); - fscache_cookie_put(cookie); - if (detached) - fscache_cookie_put(cookie); - } + if (awaken) + wake_up_bit(&cookie->flags, FSCACHE_COOKIE_INVALIDATING); + /* Prevent a race with our last child, which has to signal EV_CLEARED + * before dropping our spinlock. + */ + spin_lock(&object->lock); spin_unlock(&object->lock); - fscache_drop_object(object); -} + /* Discard from the cache's collection of objects */ + spin_lock(&cache->object_list_lock); + list_del_init(&object->cache_link); + spin_unlock(&cache->object_list_lock); -/* - * withdraw an object from active service at the behest of the cache - * - need break the links to a cached object cookie - * - called under two situations: - * (1) recycler decides to reclaim an in-use object - * (2) a cache is unmounted - * - have to take care as the cookie can be being relinquished by the netfs - * simultaneously - * - the object is pinned by the caller holding a refcount on it - */ -void fscache_withdrawing_object(struct fscache_cache *cache, - struct fscache_object *object) -{ - bool enqueue = false; + fscache_stat(&fscache_n_cop_drop_object); + cache->ops->drop_object(object); + fscache_stat_d(&fscache_n_cop_drop_object); - _enter(",OBJ%x", object->debug_id); + /* The parent object wants to know when all it dependents have gone */ + if (parent) { + _debug("release parent OBJ%x {%d}", + parent->debug_id, parent->n_children); - spin_lock(&object->lock); - if (object->state < FSCACHE_OBJECT_WITHDRAWING) { - object->state = FSCACHE_OBJECT_WITHDRAWING; - enqueue = true; + spin_lock(&parent->lock); + parent->n_children--; + if (parent->n_children == 0) + fscache_raise_event(parent, FSCACHE_OBJECT_EV_CLEARED); + spin_unlock(&parent->lock); + object->parent = NULL; } - spin_unlock(&object->lock); - if (enqueue) - fscache_enqueue_object(object); + /* this just shifts the object release to the work processor */ + fscache_put_object(object); + fscache_stat(&fscache_n_object_dead); _leave(""); + return transit_to(OBJECT_DEAD); } /* @@ -771,7 +736,7 @@ static int fscache_get_object(struct fscache_object *object) } /* - * discard a ref on a work item + * Discard a ref on an object */ static void fscache_put_object(struct fscache_object *object) { @@ -780,6 +745,22 @@ static void fscache_put_object(struct fscache_object *object) fscache_stat_d(&fscache_n_cop_put_object); } +/** + * fscache_object_destroy - Note that a cache object is about to be destroyed + * @object: The object to be destroyed + * + * Note the imminent destruction and deallocation of a cache object record. + */ +void fscache_object_destroy(struct fscache_object *object) +{ + fscache_objlist_remove(object); + + /* We can get rid of the cookie now */ + fscache_cookie_put(object->cookie); + object->cookie = NULL; +} +EXPORT_SYMBOL(fscache_object_destroy); + /* * enqueue an object for metadata-type processing */ @@ -803,7 +784,7 @@ void fscache_enqueue_object(struct fscache_object *object) /** * fscache_object_sleep_till_congested - Sleep until object wq is congested - * @timoutp: Scheduler sleep timeout + * @timeoutp: Scheduler sleep timeout * * Allow an object handler to sleep until the object workqueue is congested. * @@ -831,18 +812,21 @@ bool fscache_object_sleep_till_congested(signed long *timeoutp) EXPORT_SYMBOL_GPL(fscache_object_sleep_till_congested); /* - * enqueue the dependents of an object for metadata-type processing - * - the caller must hold the object's lock - * - this may cause an already locked object to wind up being processed again + * Enqueue the dependents of an object for metadata-type processing. + * + * If we don't manage to finish the list before the scheduler wants to run + * again then return false immediately. We return true if the list was + * cleared. */ -static void fscache_enqueue_dependents(struct fscache_object *object) +static bool fscache_enqueue_dependents(struct fscache_object *object, int event) { struct fscache_object *dep; + bool ret = true; _enter("{OBJ%x}", object->debug_id); if (list_empty(&object->dependents)) - return; + return true; spin_lock(&object->lock); @@ -851,23 +835,23 @@ static void fscache_enqueue_dependents(struct fscache_object *object) struct fscache_object, dep_link); list_del_init(&dep->dep_link); - - /* sort onto appropriate lists */ - fscache_enqueue_object(dep); + fscache_raise_event(dep, event); fscache_put_object(dep); - if (!list_empty(&object->dependents)) - cond_resched_lock(&object->lock); + if (!list_empty(&object->dependents) && need_resched()) { + ret = false; + break; + } } spin_unlock(&object->lock); + return ret; } /* * remove an object from whatever queue it's waiting on - * - the caller must hold object->lock */ -void fscache_dequeue_object(struct fscache_object *object) +static void fscache_dequeue_object(struct fscache_object *object) { _enter("{OBJ%x}", object->debug_id); @@ -886,7 +870,10 @@ void fscache_dequeue_object(struct fscache_object *object) * @data: The auxiliary data for the object * @datalen: The size of the auxiliary data * - * This function consults the netfs about the coherency state of an object + * This function consults the netfs about the coherency state of an object. + * The caller must be holding a ref on cookie->n_active (held by + * fscache_look_up_object() on behalf of the cache backend during object lookup + * and creation). */ enum fscache_checkaux fscache_check_aux(struct fscache_object *object, const void *data, uint16_t datalen) @@ -927,12 +914,23 @@ EXPORT_SYMBOL(fscache_check_aux); /* * Asynchronously invalidate an object. */ -static void fscache_invalidate_object(struct fscache_object *object) +static const struct fscache_state *_fscache_invalidate_object(struct fscache_object *object, + int event) { struct fscache_operation *op; struct fscache_cookie *cookie = object->cookie; - _enter("{OBJ%x}", object->debug_id); + _enter("{OBJ%x},%d", object->debug_id, event); + + /* We're going to need the cookie. If the cookie is not available then + * retire the object instead. + */ + if (!fscache_use_cookie(object)) { + ASSERT(object->cookie->stores.rnode == NULL); + set_bit(FSCACHE_COOKIE_RETIRED, &cookie->flags); + _leave(" [no cookie]"); + return transit_to(KILL_OBJECT); + } /* Reject any new read/write ops and abort any that are pending. */ fscache_invalidate_writes(cookie); @@ -941,14 +939,13 @@ static void fscache_invalidate_object(struct fscache_object *object) /* Now we have to wait for in-progress reads and writes */ op = kzalloc(sizeof(*op), GFP_KERNEL); - if (!op) { - fscache_raise_event(object, FSCACHE_OBJECT_EV_ERROR); - _leave(" [ENOMEM]"); - return; - } + if (!op) + goto nomem; fscache_operation_init(op, object->cache->ops->invalidate_object, NULL); - op->flags = FSCACHE_OP_ASYNC | (1 << FSCACHE_OP_EXCLUSIVE); + op->flags = FSCACHE_OP_ASYNC | + (1 << FSCACHE_OP_EXCLUSIVE) | + (1 << FSCACHE_OP_UNUSE_COOKIE); spin_lock(&cookie->lock); if (fscache_submit_exclusive_op(object, op) < 0) @@ -965,13 +962,50 @@ static void fscache_invalidate_object(struct fscache_object *object) /* We can allow read and write requests to come in once again. They'll * queue up behind our exclusive invalidation operation. */ - fscache_invalidation_complete(cookie); - _leave(""); - return; + if (test_and_clear_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) + wake_up_bit(&cookie->flags, FSCACHE_COOKIE_INVALIDATING); + _leave(" [ok]"); + return transit_to(UPDATE_OBJECT); + +nomem: + clear_bit(FSCACHE_OBJECT_IS_LIVE, &object->flags); + fscache_unuse_cookie(object); + _leave(" [ENOMEM]"); + return transit_to(KILL_OBJECT); submit_op_failed: + clear_bit(FSCACHE_OBJECT_IS_LIVE, &object->flags); spin_unlock(&cookie->lock); kfree(op); - fscache_raise_event(object, FSCACHE_OBJECT_EV_ERROR); _leave(" [EIO]"); + return transit_to(KILL_OBJECT); +} + +static const struct fscache_state *fscache_invalidate_object(struct fscache_object *object, + int event) +{ + const struct fscache_state *s; + + fscache_stat(&fscache_n_invalidates_run); + fscache_stat(&fscache_n_cop_invalidate_object); + s = _fscache_invalidate_object(object, event); + fscache_stat_d(&fscache_n_cop_invalidate_object); + return s; +} + +/* + * Asynchronously update an object. + */ +static const struct fscache_state *fscache_update_object(struct fscache_object *object, + int event) +{ + _enter("{OBJ%x},%d", object->debug_id, event); + + fscache_stat(&fscache_n_updates_run); + fscache_stat(&fscache_n_cop_update_object); + object->cache->ops->update_object(object); + fscache_stat_d(&fscache_n_cop_update_object); + + _leave(""); + return transit_to(WAIT_FOR_CMD); } diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c index 762a9ec4ffa4..318071aca217 100644 --- a/fs/fscache/operation.c +++ b/fs/fscache/operation.c @@ -35,7 +35,7 @@ void fscache_enqueue_operation(struct fscache_operation *op) ASSERT(list_empty(&op->pend_link)); ASSERT(op->processor != NULL); - ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE); + ASSERT(fscache_object_is_available(op->object)); ASSERTCMP(atomic_read(&op->usage), >, 0); ASSERTCMP(op->state, ==, FSCACHE_OP_ST_IN_PROGRESS); @@ -119,7 +119,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object, /* need to issue a new write op after this */ clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags); ret = 0; - } else if (object->state == FSCACHE_OBJECT_CREATING) { + } else if (test_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags)) { op->object = object; object->n_ops++; object->n_exclusive++; /* reads and writes must wait */ @@ -144,7 +144,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object, */ static void fscache_report_unexpected_submission(struct fscache_object *object, struct fscache_operation *op, - unsigned long ostate) + const struct fscache_state *ostate) { static bool once_only; struct fscache_operation *p; @@ -155,11 +155,8 @@ static void fscache_report_unexpected_submission(struct fscache_object *object, once_only = true; kdebug("unexpected submission OP%x [OBJ%x %s]", - op->debug_id, object->debug_id, - fscache_object_states[object->state]); - kdebug("objstate=%s [%s]", - fscache_object_states[object->state], - fscache_object_states[ostate]); + op->debug_id, object->debug_id, object->state->name); + kdebug("objstate=%s [%s]", object->state->name, ostate->name); kdebug("objflags=%lx", object->flags); kdebug("objevent=%lx [%lx]", object->events, object->event_mask); kdebug("ops=%u inp=%u exc=%u", @@ -190,7 +187,7 @@ static void fscache_report_unexpected_submission(struct fscache_object *object, int fscache_submit_op(struct fscache_object *object, struct fscache_operation *op) { - unsigned long ostate; + const struct fscache_state *ostate; int ret; _enter("{OBJ%x OP%x},{%u}", @@ -226,16 +223,14 @@ int fscache_submit_op(struct fscache_object *object, fscache_run_op(object, op); } ret = 0; - } else if (object->state == FSCACHE_OBJECT_CREATING) { + } else if (test_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags)) { op->object = object; object->n_ops++; atomic_inc(&op->usage); list_add_tail(&op->pend_link, &object->pending_ops); fscache_stat(&fscache_n_op_pend); ret = 0; - } else if (object->state == FSCACHE_OBJECT_DYING || - object->state == FSCACHE_OBJECT_LC_DYING || - object->state == FSCACHE_OBJECT_WITHDRAWING) { + } else if (fscache_object_is_dying(object)) { fscache_stat(&fscache_n_op_rejected); op->state = FSCACHE_OP_ST_CANCELLED; ret = -ENOBUFS; @@ -265,8 +260,8 @@ void fscache_abort_object(struct fscache_object *object) } /* - * jump start the operation processing on an object - * - caller must hold object->lock + * Jump start the operation processing on an object. The caller must hold + * object->lock. */ void fscache_start_operations(struct fscache_object *object) { @@ -428,14 +423,10 @@ void fscache_put_operation(struct fscache_operation *op) object = op->object; - if (test_bit(FSCACHE_OP_DEC_READ_CNT, &op->flags)) { - if (atomic_dec_and_test(&object->n_reads)) { - clear_bit(FSCACHE_COOKIE_WAITING_ON_READS, - &object->cookie->flags); - wake_up_bit(&object->cookie->flags, - FSCACHE_COOKIE_WAITING_ON_READS); - } - } + if (test_bit(FSCACHE_OP_DEC_READ_CNT, &op->flags)) + atomic_dec(&object->n_reads); + if (test_bit(FSCACHE_OP_UNUSE_COOKIE, &op->flags)) + fscache_unuse_cookie(object); /* now... we may get called with the object spinlock held, so we * complete the cleanup here only if we can immediately acquire the diff --git a/fs/fscache/page.c b/fs/fscache/page.c index ff000e52072d..d479ab3c63e4 100644 --- a/fs/fscache/page.c +++ b/fs/fscache/page.c @@ -109,7 +109,7 @@ page_busy: * allocator as the work threads writing to the cache may all end up * sleeping on memory allocation, so we may need to impose a timeout * too. */ - if (!(gfp & __GFP_WAIT)) { + if (!(gfp & __GFP_WAIT) || !(gfp & __GFP_FS)) { fscache_stat(&fscache_n_store_vmscan_busy); return false; } @@ -163,10 +163,12 @@ static void fscache_attr_changed_op(struct fscache_operation *op) fscache_stat(&fscache_n_attr_changed_calls); - if (fscache_object_is_active(object)) { + if (fscache_object_is_active(object) && + fscache_use_cookie(object)) { fscache_stat(&fscache_n_cop_attr_changed); ret = object->cache->ops->attr_changed(object); fscache_stat_d(&fscache_n_cop_attr_changed); + fscache_unuse_cookie(object); if (ret < 0) fscache_abort_object(object); } @@ -233,7 +235,7 @@ static void fscache_release_retrieval_op(struct fscache_operation *_op) _enter("{OP%x}", op->op.debug_id); - ASSERTCMP(op->n_pages, ==, 0); + ASSERTCMP(atomic_read(&op->n_pages), ==, 0); fscache_hist(fscache_retrieval_histogram, op->start_time); if (op->context) @@ -246,6 +248,7 @@ static void fscache_release_retrieval_op(struct fscache_operation *_op) * allocate a retrieval op */ static struct fscache_retrieval *fscache_alloc_retrieval( + struct fscache_cookie *cookie, struct address_space *mapping, fscache_rw_complete_t end_io_func, void *context) @@ -260,7 +263,10 @@ static struct fscache_retrieval *fscache_alloc_retrieval( } fscache_operation_init(&op->op, NULL, fscache_release_retrieval_op); - op->op.flags = FSCACHE_OP_MYTHREAD | (1 << FSCACHE_OP_WAITING); + atomic_inc(&cookie->n_active); + op->op.flags = FSCACHE_OP_MYTHREAD | + (1UL << FSCACHE_OP_WAITING) | + (1UL << FSCACHE_OP_UNUSE_COOKIE); op->mapping = mapping; op->end_io_func = end_io_func; op->context = context; @@ -310,7 +316,7 @@ static void fscache_do_cancel_retrieval(struct fscache_operation *_op) struct fscache_retrieval *op = container_of(_op, struct fscache_retrieval, op); - op->n_pages = 0; + atomic_set(&op->n_pages, 0); } /* @@ -394,12 +400,13 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie, if (fscache_wait_for_deferred_lookup(cookie) < 0) return -ERESTARTSYS; - op = fscache_alloc_retrieval(page->mapping, end_io_func, context); + op = fscache_alloc_retrieval(cookie, page->mapping, + end_io_func,context); if (!op) { _leave(" = -ENOMEM"); return -ENOMEM; } - op->n_pages = 1; + atomic_set(&op->n_pages, 1); spin_lock(&cookie->lock); @@ -408,7 +415,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie, object = hlist_entry(cookie->backing_objects.first, struct fscache_object, cookie_link); - ASSERTCMP(object->state, >, FSCACHE_OBJECT_LOOKING_UP); + ASSERT(test_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags)); atomic_inc(&object->n_reads); __set_bit(FSCACHE_OP_DEC_READ_CNT, &op->op.flags); @@ -465,6 +472,7 @@ nobufs_unlock_dec: atomic_dec(&object->n_reads); nobufs_unlock: spin_unlock(&cookie->lock); + atomic_dec(&cookie->n_active); kfree(op); nobufs: fscache_stat(&fscache_n_retrievals_nobufs); @@ -522,10 +530,10 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie, if (fscache_wait_for_deferred_lookup(cookie) < 0) return -ERESTARTSYS; - op = fscache_alloc_retrieval(mapping, end_io_func, context); + op = fscache_alloc_retrieval(cookie, mapping, end_io_func, context); if (!op) return -ENOMEM; - op->n_pages = *nr_pages; + atomic_set(&op->n_pages, *nr_pages); spin_lock(&cookie->lock); @@ -589,6 +597,7 @@ nobufs_unlock_dec: atomic_dec(&object->n_reads); nobufs_unlock: spin_unlock(&cookie->lock); + atomic_dec(&cookie->n_active); kfree(op); nobufs: fscache_stat(&fscache_n_retrievals_nobufs); @@ -631,10 +640,10 @@ int __fscache_alloc_page(struct fscache_cookie *cookie, if (fscache_wait_for_deferred_lookup(cookie) < 0) return -ERESTARTSYS; - op = fscache_alloc_retrieval(page->mapping, NULL, NULL); + op = fscache_alloc_retrieval(cookie, page->mapping, NULL, NULL); if (!op) return -ENOMEM; - op->n_pages = 1; + atomic_set(&op->n_pages, 1); spin_lock(&cookie->lock); @@ -675,6 +684,7 @@ error: nobufs_unlock: spin_unlock(&cookie->lock); + atomic_dec(&cookie->n_active); kfree(op); nobufs: fscache_stat(&fscache_n_allocs_nobufs); @@ -729,8 +739,9 @@ static void fscache_write_op(struct fscache_operation *_op) */ spin_unlock(&object->lock); fscache_op_complete(&op->op, false); - _leave(" [cancel] op{f=%lx s=%u} obj{s=%u f=%lx}", - _op->flags, _op->state, object->state, object->flags); + _leave(" [cancel] op{f=%lx s=%u} obj{s=%s f=%lx}", + _op->flags, _op->state, object->state->short_name, + object->flags); return; } @@ -796,11 +807,16 @@ void fscache_invalidate_writes(struct fscache_cookie *cookie) _enter(""); - while (spin_lock(&cookie->stores_lock), - n = radix_tree_gang_lookup_tag(&cookie->stores, results, 0, - ARRAY_SIZE(results), - FSCACHE_COOKIE_PENDING_TAG), - n > 0) { + for (;;) { + spin_lock(&cookie->stores_lock); + n = radix_tree_gang_lookup_tag(&cookie->stores, results, 0, + ARRAY_SIZE(results), + FSCACHE_COOKIE_PENDING_TAG); + if (n == 0) { + spin_unlock(&cookie->stores_lock); + break; + } + for (i = n - 1; i >= 0; i--) { page = results[i]; radix_tree_delete(&cookie->stores, page->index); @@ -812,7 +828,6 @@ void fscache_invalidate_writes(struct fscache_cookie *cookie) page_cache_release(results[i]); } - spin_unlock(&cookie->stores_lock); _leave(""); } @@ -829,14 +844,12 @@ void fscache_invalidate_writes(struct fscache_cookie *cookie) * (1) negative lookup, object not yet created (FSCACHE_COOKIE_CREATING is * set) * - * (a) no writes yet (set FSCACHE_COOKIE_PENDING_FILL and queue deferred - * fill op) + * (a) no writes yet * * (b) writes deferred till post-creation (mark page for writing and * return immediately) * * (2) negative lookup, object created, initial fill being made from netfs - * (FSCACHE_COOKIE_INITIAL_FILL is set) * * (a) fill point not yet reached this page (mark page for writing and * return) @@ -873,7 +886,9 @@ int __fscache_write_page(struct fscache_cookie *cookie, fscache_operation_init(&op->op, fscache_write_op, fscache_release_write_op); - op->op.flags = FSCACHE_OP_ASYNC | (1 << FSCACHE_OP_WAITING); + op->op.flags = FSCACHE_OP_ASYNC | + (1 << FSCACHE_OP_WAITING) | + (1 << FSCACHE_OP_UNUSE_COOKIE); ret = radix_tree_preload(gfp & ~__GFP_HIGHMEM); if (ret < 0) @@ -919,6 +934,7 @@ int __fscache_write_page(struct fscache_cookie *cookie, op->op.debug_id = atomic_inc_return(&fscache_op_debug_id); op->store_limit = object->store_limit; + atomic_inc(&cookie->n_active); if (fscache_submit_op(object, &op->op) < 0) goto submit_failed; @@ -945,6 +961,7 @@ already_pending: return 0; submit_failed: + atomic_dec(&cookie->n_active); spin_lock(&cookie->stores_lock); radix_tree_delete(&cookie->stores, page->index); spin_unlock(&cookie->stores_lock); diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c index f3f783dc4f75..0eda52738ec4 100644 --- a/fs/fuse/dir.c +++ b/fs/fuse/dir.c @@ -14,7 +14,7 @@ #include <linux/namei.h> #include <linux/slab.h> -static bool fuse_use_readdirplus(struct inode *dir, struct file *filp) +static bool fuse_use_readdirplus(struct inode *dir, struct dir_context *ctx) { struct fuse_conn *fc = get_fuse_conn(dir); struct fuse_inode *fi = get_fuse_inode(dir); @@ -25,7 +25,7 @@ static bool fuse_use_readdirplus(struct inode *dir, struct file *filp) return true; if (test_and_clear_bit(FUSE_I_ADVISE_RDPLUS, &fi->state)) return true; - if (filp->f_pos == 0) + if (ctx->pos == 0) return true; return false; } @@ -1165,25 +1165,23 @@ static int fuse_permission(struct inode *inode, int mask) } static int parse_dirfile(char *buf, size_t nbytes, struct file *file, - void *dstbuf, filldir_t filldir) + struct dir_context *ctx) { while (nbytes >= FUSE_NAME_OFFSET) { struct fuse_dirent *dirent = (struct fuse_dirent *) buf; size_t reclen = FUSE_DIRENT_SIZE(dirent); - int over; if (!dirent->namelen || dirent->namelen > FUSE_NAME_MAX) return -EIO; if (reclen > nbytes) break; - over = filldir(dstbuf, dirent->name, dirent->namelen, - file->f_pos, dirent->ino, dirent->type); - if (over) + if (!dir_emit(ctx, dirent->name, dirent->namelen, + dirent->ino, dirent->type)) break; buf += reclen; nbytes -= reclen; - file->f_pos = dirent->off; + ctx->pos = dirent->off; } return 0; @@ -1284,7 +1282,7 @@ out: } static int parse_dirplusfile(char *buf, size_t nbytes, struct file *file, - void *dstbuf, filldir_t filldir, u64 attr_version) + struct dir_context *ctx, u64 attr_version) { struct fuse_direntplus *direntplus; struct fuse_dirent *dirent; @@ -1309,10 +1307,9 @@ static int parse_dirplusfile(char *buf, size_t nbytes, struct file *file, we need to send a FORGET for each of those which we did not link. */ - over = filldir(dstbuf, dirent->name, dirent->namelen, - file->f_pos, dirent->ino, - dirent->type); - file->f_pos = dirent->off; + over = !dir_emit(ctx, dirent->name, dirent->namelen, + dirent->ino, dirent->type); + ctx->pos = dirent->off; } buf += reclen; @@ -1326,7 +1323,7 @@ static int parse_dirplusfile(char *buf, size_t nbytes, struct file *file, return 0; } -static int fuse_readdir(struct file *file, void *dstbuf, filldir_t filldir) +static int fuse_readdir(struct file *file, struct dir_context *ctx) { int plus, err; size_t nbytes; @@ -1349,17 +1346,17 @@ static int fuse_readdir(struct file *file, void *dstbuf, filldir_t filldir) return -ENOMEM; } - plus = fuse_use_readdirplus(inode, file); + plus = fuse_use_readdirplus(inode, ctx); req->out.argpages = 1; req->num_pages = 1; req->pages[0] = page; req->page_descs[0].length = PAGE_SIZE; if (plus) { attr_version = fuse_get_attr_version(fc); - fuse_read_fill(req, file, file->f_pos, PAGE_SIZE, + fuse_read_fill(req, file, ctx->pos, PAGE_SIZE, FUSE_READDIRPLUS); } else { - fuse_read_fill(req, file, file->f_pos, PAGE_SIZE, + fuse_read_fill(req, file, ctx->pos, PAGE_SIZE, FUSE_READDIR); } fuse_request_send(fc, req); @@ -1369,11 +1366,11 @@ static int fuse_readdir(struct file *file, void *dstbuf, filldir_t filldir) if (!err) { if (plus) { err = parse_dirplusfile(page_address(page), nbytes, - file, dstbuf, filldir, + file, ctx, attr_version); } else { err = parse_dirfile(page_address(page), nbytes, file, - dstbuf, filldir); + ctx); } } @@ -1886,7 +1883,7 @@ static const struct inode_operations fuse_dir_inode_operations = { static const struct file_operations fuse_dir_operations = { .llseek = generic_file_llseek, .read = generic_read_dir, - .readdir = fuse_readdir, + .iterate = fuse_readdir, .open = fuse_dir_open, .release = fuse_dir_release, .fsync = fuse_dir_fsync, diff --git a/fs/fuse/file.c b/fs/fuse/file.c index e570081f9f76..5c121fe19c5f 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -548,8 +548,7 @@ static void fuse_aio_complete(struct fuse_io_priv *io, int err, ssize_t pos) res = io->bytes < 0 ? io->size : io->bytes; if (!is_sync_kiocb(io->iocb)) { - struct path *path = &io->iocb->ki_filp->f_path; - struct inode *inode = path->dentry->d_inode; + struct inode *inode = file_inode(io->iocb->ki_filp); struct fuse_conn *fc = get_fuse_conn(inode); struct fuse_inode *fi = get_fuse_inode(inode); @@ -2470,13 +2469,16 @@ static long fuse_file_fallocate(struct file *file, int mode, loff_t offset, .mode = mode }; int err; + bool lock_inode = !(mode & FALLOC_FL_KEEP_SIZE) || + (mode & FALLOC_FL_PUNCH_HOLE); if (fc->no_fallocate) return -EOPNOTSUPP; - if (mode & FALLOC_FL_PUNCH_HOLE) { + if (lock_inode) { mutex_lock(&inode->i_mutex); - fuse_set_nowrite(inode); + if (mode & FALLOC_FL_PUNCH_HOLE) + fuse_set_nowrite(inode); } req = fuse_get_req_nopages(fc); @@ -2511,8 +2513,9 @@ static long fuse_file_fallocate(struct file *file, int mode, loff_t offset, fuse_invalidate_attr(inode); out: - if (mode & FALLOC_FL_PUNCH_HOLE) { - fuse_release_nowrite(inode); + if (lock_inode) { + if (mode & FALLOC_FL_PUNCH_HOLE) + fuse_release_nowrite(inode); mutex_unlock(&inode->i_mutex); } diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c index 9a0cdde14a08..0b578598c6ac 100644 --- a/fs/fuse/inode.c +++ b/fs/fuse/inode.c @@ -785,7 +785,7 @@ static const struct super_operations fuse_super_operations = { static void sanitize_global_limit(unsigned *limit) { if (*limit == 0) - *limit = ((num_physpages << PAGE_SHIFT) >> 13) / + *limit = ((totalram_pages << PAGE_SHIFT) >> 13) / sizeof(struct fuse_req); if (*limit >= 1 << 16) diff --git a/fs/gfs2/Kconfig b/fs/gfs2/Kconfig index 5a376ab81feb..90c6a8faaecb 100644 --- a/fs/gfs2/Kconfig +++ b/fs/gfs2/Kconfig @@ -20,13 +20,12 @@ config GFS2_FS be found here: http://sources.redhat.com/cluster The "nolock" lock module is now built in to GFS2 by default. If - you want to use the DLM, be sure to enable HOTPLUG and IPv4/6 - networking. + you want to use the DLM, be sure to enable IPv4/6 networking. config GFS2_FS_LOCKING_DLM bool "GFS2 DLM locking" depends on (GFS2_FS!=n) && NET && INET && (IPV6 || IPV6=n) && \ - HOTPLUG && CONFIGFS_FS && SYSFS && (DLM=y || DLM=GFS2_FS) + CONFIGFS_FS && SYSFS && (DLM=y || DLM=GFS2_FS) help Multiple node locking module for GFS2 diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c index 0bad69ed6336..ee48ad37d9c0 100644 --- a/fs/gfs2/aops.c +++ b/fs/gfs2/aops.c @@ -110,7 +110,7 @@ static int gfs2_writepage_common(struct page *page, /* Is the page fully outside i_size? (truncate in progress) */ offset = i_size & (PAGE_CACHE_SIZE-1); if (page->index > end_index || (page->index == end_index && !offset)) { - page->mapping->a_ops->invalidatepage(page, 0); + page->mapping->a_ops->invalidatepage(page, 0, PAGE_CACHE_SIZE); goto out; } return 1; @@ -299,7 +299,8 @@ static int gfs2_write_jdata_pagevec(struct address_space *mapping, /* Is the page fully outside i_size? (truncate in progress) */ if (page->index > end_index || (page->index == end_index && !offset)) { - page->mapping->a_ops->invalidatepage(page, 0); + page->mapping->a_ops->invalidatepage(page, 0, + PAGE_CACHE_SIZE); unlock_page(page); continue; } @@ -943,27 +944,33 @@ static void gfs2_discard(struct gfs2_sbd *sdp, struct buffer_head *bh) unlock_buffer(bh); } -static void gfs2_invalidatepage(struct page *page, unsigned long offset) +static void gfs2_invalidatepage(struct page *page, unsigned int offset, + unsigned int length) { struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host); + unsigned int stop = offset + length; + int partial_page = (offset || length < PAGE_CACHE_SIZE); struct buffer_head *bh, *head; unsigned long pos = 0; BUG_ON(!PageLocked(page)); - if (offset == 0) + if (!partial_page) ClearPageChecked(page); if (!page_has_buffers(page)) goto out; bh = head = page_buffers(page); do { + if (pos + bh->b_size > stop) + return; + if (offset <= pos) gfs2_discard(sdp, bh); pos += bh->b_size; bh = bh->b_this_page; } while (bh != head); out: - if (offset == 0) + if (!partial_page) try_to_release_page(page, 0); } diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c index 93b5809c20bb..5e2f56fccf6b 100644 --- a/fs/gfs2/bmap.c +++ b/fs/gfs2/bmap.c @@ -1232,7 +1232,9 @@ static int do_grow(struct inode *inode, u64 size) unstuff = 1; } - error = gfs2_trans_begin(sdp, RES_DINODE + RES_STATFS + RES_RG_BIT, 0); + error = gfs2_trans_begin(sdp, RES_DINODE + RES_STATFS + RES_RG_BIT + + (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF ? + 0 : RES_QUOTA), 0); if (error) goto do_grow_release; diff --git a/fs/gfs2/dentry.c b/fs/gfs2/dentry.c index 4fddb3c22d25..f2448ab2aac5 100644 --- a/fs/gfs2/dentry.c +++ b/fs/gfs2/dentry.c @@ -109,8 +109,7 @@ fail: return 0; } -static int gfs2_dhash(const struct dentry *dentry, const struct inode *inode, - struct qstr *str) +static int gfs2_dhash(const struct dentry *dentry, struct qstr *str) { str->hash = gfs2_disk_hash(str->name, str->len); return 0; diff --git a/fs/gfs2/dir.c b/fs/gfs2/dir.c index b631c9043460..0cb4c1557f20 100644 --- a/fs/gfs2/dir.c +++ b/fs/gfs2/dir.c @@ -1125,13 +1125,14 @@ static int dir_double_exhash(struct gfs2_inode *dip) if (IS_ERR(hc)) return PTR_ERR(hc); - h = hc2 = kmalloc(hsize_bytes * 2, GFP_NOFS | __GFP_NOWARN); + hc2 = kmalloc(hsize_bytes * 2, GFP_NOFS | __GFP_NOWARN); if (hc2 == NULL) hc2 = __vmalloc(hsize_bytes * 2, GFP_NOFS, PAGE_KERNEL); if (!hc2) return -ENOMEM; + h = hc2; error = gfs2_meta_inode_buffer(dip, &dibh); if (error) goto out_kfree; @@ -1212,9 +1213,7 @@ static int compare_dents(const void *a, const void *b) /** * do_filldir_main - read out directory entries * @dip: The GFS2 inode - * @offset: The offset in the file to read from - * @opaque: opaque data to pass to filldir - * @filldir: The function to pass entries to + * @ctx: what to feed the entries to * @darr: an array of struct gfs2_dirent pointers to read * @entries: the number of entries in darr * @copied: pointer to int that's non-zero if a entry has been copied out @@ -1224,11 +1223,10 @@ static int compare_dents(const void *a, const void *b) * the possibility that they will fall into different readdir buffers or * that someone will want to seek to that location. * - * Returns: errno, >0 on exception from filldir + * Returns: errno, >0 if the actor tells you to stop */ -static int do_filldir_main(struct gfs2_inode *dip, u64 *offset, - void *opaque, filldir_t filldir, +static int do_filldir_main(struct gfs2_inode *dip, struct dir_context *ctx, const struct gfs2_dirent **darr, u32 entries, int *copied) { @@ -1236,7 +1234,6 @@ static int do_filldir_main(struct gfs2_inode *dip, u64 *offset, u64 off, off_next; unsigned int x, y; int run = 0; - int error = 0; sort(darr, entries, sizeof(struct gfs2_dirent *), compare_dents, NULL); @@ -1253,9 +1250,9 @@ static int do_filldir_main(struct gfs2_inode *dip, u64 *offset, off_next = be32_to_cpu(dent_next->de_hash); off_next = gfs2_disk_hash2offset(off_next); - if (off < *offset) + if (off < ctx->pos) continue; - *offset = off; + ctx->pos = off; if (off_next == off) { if (*copied && !run) @@ -1264,26 +1261,25 @@ static int do_filldir_main(struct gfs2_inode *dip, u64 *offset, } else run = 0; } else { - if (off < *offset) + if (off < ctx->pos) continue; - *offset = off; + ctx->pos = off; } - error = filldir(opaque, (const char *)(dent + 1), + if (!dir_emit(ctx, (const char *)(dent + 1), be16_to_cpu(dent->de_name_len), - off, be64_to_cpu(dent->de_inum.no_addr), - be16_to_cpu(dent->de_type)); - if (error) + be64_to_cpu(dent->de_inum.no_addr), + be16_to_cpu(dent->de_type))) return 1; *copied = 1; } - /* Increment the *offset by one, so the next time we come into the + /* Increment the ctx->pos by one, so the next time we come into the do_filldir fxn, we get the next entry instead of the last one in the current leaf */ - (*offset)++; + ctx->pos++; return 0; } @@ -1307,8 +1303,8 @@ static void gfs2_free_sort_buffer(void *ptr) kfree(ptr); } -static int gfs2_dir_read_leaf(struct inode *inode, u64 *offset, void *opaque, - filldir_t filldir, int *copied, unsigned *depth, +static int gfs2_dir_read_leaf(struct inode *inode, struct dir_context *ctx, + int *copied, unsigned *depth, u64 leaf_no) { struct gfs2_inode *ip = GFS2_I(inode); @@ -1386,8 +1382,7 @@ static int gfs2_dir_read_leaf(struct inode *inode, u64 *offset, void *opaque, } while(lfn); BUG_ON(entries2 != entries); - error = do_filldir_main(ip, offset, opaque, filldir, darr, - entries, copied); + error = do_filldir_main(ip, ctx, darr, entries, copied); out_free: for(i = 0; i < leaf; i++) brelse(larr[i]); @@ -1446,15 +1441,13 @@ static void gfs2_dir_readahead(struct inode *inode, unsigned hsize, u32 index, /** * dir_e_read - Reads the entries from a directory into a filldir buffer * @dip: dinode pointer - * @offset: the hash of the last entry read shifted to the right once - * @opaque: buffer for the filldir function to fill - * @filldir: points to the filldir function to use + * @ctx: actor to feed the entries to * * Returns: errno */ -static int dir_e_read(struct inode *inode, u64 *offset, void *opaque, - filldir_t filldir, struct file_ra_state *f_ra) +static int dir_e_read(struct inode *inode, struct dir_context *ctx, + struct file_ra_state *f_ra) { struct gfs2_inode *dip = GFS2_I(inode); u32 hsize, len = 0; @@ -1465,7 +1458,7 @@ static int dir_e_read(struct inode *inode, u64 *offset, void *opaque, unsigned depth = 0; hsize = 1 << dip->i_depth; - hash = gfs2_dir_offset2hash(*offset); + hash = gfs2_dir_offset2hash(ctx->pos); index = hash >> (32 - dip->i_depth); if (dip->i_hash_cache == NULL) @@ -1477,7 +1470,7 @@ static int dir_e_read(struct inode *inode, u64 *offset, void *opaque, gfs2_dir_readahead(inode, hsize, index, f_ra); while (index < hsize) { - error = gfs2_dir_read_leaf(inode, offset, opaque, filldir, + error = gfs2_dir_read_leaf(inode, ctx, &copied, &depth, be64_to_cpu(lp[index])); if (error) @@ -1492,8 +1485,8 @@ static int dir_e_read(struct inode *inode, u64 *offset, void *opaque, return error; } -int gfs2_dir_read(struct inode *inode, u64 *offset, void *opaque, - filldir_t filldir, struct file_ra_state *f_ra) +int gfs2_dir_read(struct inode *inode, struct dir_context *ctx, + struct file_ra_state *f_ra) { struct gfs2_inode *dip = GFS2_I(inode); struct gfs2_sbd *sdp = GFS2_SB(inode); @@ -1507,7 +1500,7 @@ int gfs2_dir_read(struct inode *inode, u64 *offset, void *opaque, return 0; if (dip->i_diskflags & GFS2_DIF_EXHASH) - return dir_e_read(inode, offset, opaque, filldir, f_ra); + return dir_e_read(inode, ctx, f_ra); if (!gfs2_is_stuffed(dip)) { gfs2_consist_inode(dip); @@ -1539,7 +1532,7 @@ int gfs2_dir_read(struct inode *inode, u64 *offset, void *opaque, error = -EIO; goto out; } - error = do_filldir_main(dip, offset, opaque, filldir, darr, + error = do_filldir_main(dip, ctx, darr, dip->i_entries, &copied); out: kfree(darr); @@ -1555,9 +1548,9 @@ out: /** * gfs2_dir_search - Search a directory - * @dip: The GFS2 inode - * @filename: - * @inode: + * @dip: The GFS2 dir inode + * @name: The name we are looking up + * @fail_on_exist: Fail if the name exists rather than looking it up * * This routine searches a directory for a file or another directory. * Assumes a glock is held on dip. @@ -1565,22 +1558,25 @@ out: * Returns: errno */ -struct inode *gfs2_dir_search(struct inode *dir, const struct qstr *name) +struct inode *gfs2_dir_search(struct inode *dir, const struct qstr *name, + bool fail_on_exist) { struct buffer_head *bh; struct gfs2_dirent *dent; - struct inode *inode; + u64 addr, formal_ino; + u16 dtype; dent = gfs2_dirent_search(dir, name, gfs2_dirent_find, &bh); if (dent) { if (IS_ERR(dent)) return ERR_CAST(dent); - inode = gfs2_inode_lookup(dir->i_sb, - be16_to_cpu(dent->de_type), - be64_to_cpu(dent->de_inum.no_addr), - be64_to_cpu(dent->de_inum.no_formal_ino), 0); + dtype = be16_to_cpu(dent->de_type); + addr = be64_to_cpu(dent->de_inum.no_addr); + formal_ino = be64_to_cpu(dent->de_inum.no_formal_ino); brelse(bh); - return inode; + if (fail_on_exist) + return ERR_PTR(-EEXIST); + return gfs2_inode_lookup(dir->i_sb, dtype, addr, formal_ino, 0); } return ERR_PTR(-ENOENT); } diff --git a/fs/gfs2/dir.h b/fs/gfs2/dir.h index 98c960beab35..4f03bbd1873f 100644 --- a/fs/gfs2/dir.h +++ b/fs/gfs2/dir.h @@ -18,14 +18,15 @@ struct gfs2_inode; struct gfs2_inum; extern struct inode *gfs2_dir_search(struct inode *dir, - const struct qstr *filename); + const struct qstr *filename, + bool fail_on_exist); extern int gfs2_dir_check(struct inode *dir, const struct qstr *filename, const struct gfs2_inode *ip); extern int gfs2_dir_add(struct inode *inode, const struct qstr *filename, const struct gfs2_inode *ip); extern int gfs2_dir_del(struct gfs2_inode *dip, const struct dentry *dentry); -extern int gfs2_dir_read(struct inode *inode, u64 *offset, void *opaque, - filldir_t filldir, struct file_ra_state *f_ra); +extern int gfs2_dir_read(struct inode *inode, struct dir_context *ctx, + struct file_ra_state *f_ra); extern int gfs2_dir_mvino(struct gfs2_inode *dip, const struct qstr *filename, const struct gfs2_inode *nip, unsigned int new_type); diff --git a/fs/gfs2/export.c b/fs/gfs2/export.c index 9973df4ff565..8b9b3775e2e7 100644 --- a/fs/gfs2/export.c +++ b/fs/gfs2/export.c @@ -64,6 +64,7 @@ static int gfs2_encode_fh(struct inode *inode, __u32 *p, int *len, } struct get_name_filldir { + struct dir_context ctx; struct gfs2_inum_host inum; char *name; }; @@ -88,9 +89,11 @@ static int gfs2_get_name(struct dentry *parent, char *name, struct inode *dir = parent->d_inode; struct inode *inode = child->d_inode; struct gfs2_inode *dip, *ip; - struct get_name_filldir gnfd; + struct get_name_filldir gnfd = { + .ctx.actor = get_name_filldir, + .name = name + }; struct gfs2_holder gh; - u64 offset = 0; int error; struct file_ra_state f_ra = { .start = 0 }; @@ -106,13 +109,12 @@ static int gfs2_get_name(struct dentry *parent, char *name, *name = 0; gnfd.inum.no_addr = ip->i_no_addr; gnfd.inum.no_formal_ino = ip->i_no_formal_ino; - gnfd.name = name; error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, &gh); if (error) return error; - error = gfs2_dir_read(dir, &offset, &gnfd, get_name_filldir, &f_ra); + error = gfs2_dir_read(dir, &gnfd.ctx, &f_ra); gfs2_glock_dq_uninit(&gh); diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c index ad0dc38d87ab..72c3866a7320 100644 --- a/fs/gfs2/file.c +++ b/fs/gfs2/file.c @@ -82,35 +82,28 @@ static loff_t gfs2_llseek(struct file *file, loff_t offset, int whence) } /** - * gfs2_readdir - Read directory entries from a directory + * gfs2_readdir - Iterator for a directory * @file: The directory to read from - * @dirent: Buffer for dirents - * @filldir: Function used to do the copying + * @ctx: What to feed directory entries to * * Returns: errno */ -static int gfs2_readdir(struct file *file, void *dirent, filldir_t filldir) +static int gfs2_readdir(struct file *file, struct dir_context *ctx) { struct inode *dir = file->f_mapping->host; struct gfs2_inode *dip = GFS2_I(dir); struct gfs2_holder d_gh; - u64 offset = file->f_pos; int error; - gfs2_holder_init(dip->i_gl, LM_ST_SHARED, 0, &d_gh); - error = gfs2_glock_nq(&d_gh); - if (error) { - gfs2_holder_uninit(&d_gh); + error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, &d_gh); + if (error) return error; - } - error = gfs2_dir_read(dir, &offset, dirent, filldir, &file->f_ra); + error = gfs2_dir_read(dir, ctx, &file->f_ra); gfs2_glock_dq_uninit(&d_gh); - file->f_pos = offset; - return error; } @@ -538,21 +531,30 @@ static int gfs2_mmap(struct file *file, struct vm_area_struct *vma) } /** - * gfs2_open - open a file - * @inode: the inode to open - * @file: the struct file for this opening + * gfs2_open_common - This is common to open and atomic_open + * @inode: The inode being opened + * @file: The file being opened * - * Returns: errno + * This maybe called under a glock or not depending upon how it has + * been called. We must always be called under a glock for regular + * files, however. For other file types, it does not matter whether + * we hold the glock or not. + * + * Returns: Error code or 0 for success */ -static int gfs2_open(struct inode *inode, struct file *file) +int gfs2_open_common(struct inode *inode, struct file *file) { - struct gfs2_inode *ip = GFS2_I(inode); - struct gfs2_holder i_gh; struct gfs2_file *fp; - int error; + int ret; + + if (S_ISREG(inode->i_mode)) { + ret = generic_file_open(inode, file); + if (ret) + return ret; + } - fp = kzalloc(sizeof(struct gfs2_file), GFP_KERNEL); + fp = kzalloc(sizeof(struct gfs2_file), GFP_NOFS); if (!fp) return -ENOMEM; @@ -560,29 +562,43 @@ static int gfs2_open(struct inode *inode, struct file *file) gfs2_assert_warn(GFS2_SB(inode), !file->private_data); file->private_data = fp; + return 0; +} + +/** + * gfs2_open - open a file + * @inode: the inode to open + * @file: the struct file for this opening + * + * After atomic_open, this function is only used for opening files + * which are already cached. We must still get the glock for regular + * files to ensure that we have the file size uptodate for the large + * file check which is in the common code. That is only an issue for + * regular files though. + * + * Returns: errno + */ + +static int gfs2_open(struct inode *inode, struct file *file) +{ + struct gfs2_inode *ip = GFS2_I(inode); + struct gfs2_holder i_gh; + int error; + bool need_unlock = false; if (S_ISREG(ip->i_inode.i_mode)) { error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh); if (error) - goto fail; + return error; + need_unlock = true; + } - if (!(file->f_flags & O_LARGEFILE) && - i_size_read(inode) > MAX_NON_LFS) { - error = -EOVERFLOW; - goto fail_gunlock; - } + error = gfs2_open_common(inode, file); + if (need_unlock) gfs2_glock_dq_uninit(&i_gh); - } - return 0; - -fail_gunlock: - gfs2_glock_dq_uninit(&i_gh); -fail: - file->private_data = NULL; - kfree(fp); return error; } @@ -896,7 +912,7 @@ out_uninit: * cluster; until we do, disable leases (by just returning -EINVAL), * unless the administrator has requested purely local locking. * - * Locking: called under lock_flocks + * Locking: called under i_lock * * Returns: errno */ @@ -1048,7 +1064,7 @@ const struct file_operations gfs2_file_fops = { }; const struct file_operations gfs2_dir_fops = { - .readdir = gfs2_readdir, + .iterate = gfs2_readdir, .unlocked_ioctl = gfs2_ioctl, .open = gfs2_open, .release = gfs2_release, @@ -1078,7 +1094,7 @@ const struct file_operations gfs2_file_fops_nolock = { }; const struct file_operations gfs2_dir_fops_nolock = { - .readdir = gfs2_readdir, + .iterate = gfs2_readdir, .unlocked_ioctl = gfs2_ioctl, .open = gfs2_open, .release = gfs2_release, diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c index c66e99c97571..5f2e5224c51c 100644 --- a/fs/gfs2/glops.c +++ b/fs/gfs2/glops.c @@ -54,7 +54,6 @@ static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync) struct gfs2_bufdata *bd, *tmp; struct buffer_head *bh; const unsigned long b_state = (1UL << BH_Dirty)|(1UL << BH_Pinned)|(1UL << BH_Lock); - sector_t blocknr; gfs2_log_lock(sdp); spin_lock(&sdp->sd_ail_lock); @@ -65,13 +64,6 @@ static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync) continue; gfs2_ail_error(gl, bh); } - blocknr = bh->b_blocknr; - bh->b_private = NULL; - gfs2_remove_from_ail(bd); /* drops ref on bh */ - - bd->bd_bh = NULL; - bd->bd_blkno = blocknr; - gfs2_trans_add_revoke(sdp, bd); } GLOCK_BUG_ON(gl, !fsync && atomic_read(&gl->gl_ail_count)); diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c index 62b484e4a9e4..bbb2715171cd 100644 --- a/fs/gfs2/inode.c +++ b/fs/gfs2/inode.c @@ -313,7 +313,7 @@ struct inode *gfs2_lookupi(struct inode *dir, const struct qstr *name, goto out; } - inode = gfs2_dir_search(dir, name); + inode = gfs2_dir_search(dir, name, false); if (IS_ERR(inode)) error = PTR_ERR(inode); out: @@ -346,17 +346,6 @@ static int create_ok(struct gfs2_inode *dip, const struct qstr *name, if (!dip->i_inode.i_nlink) return -ENOENT; - error = gfs2_dir_check(&dip->i_inode, name, NULL); - switch (error) { - case -ENOENT: - error = 0; - break; - case 0: - return -EEXIST; - default: - return error; - } - if (dip->i_entries == (u32)-1) return -EFBIG; if (S_ISDIR(mode) && dip->i_inode.i_nlink == (u32)-1) @@ -546,6 +535,7 @@ static int gfs2_security_init(struct gfs2_inode *dip, struct gfs2_inode *ip, * gfs2_create_inode - Create a new inode * @dir: The parent directory * @dentry: The new dentry + * @file: If non-NULL, the file which is being opened * @mode: The permissions on the new inode * @dev: For device nodes, this is the device number * @symname: For symlinks, this is the link destination @@ -555,8 +545,9 @@ static int gfs2_security_init(struct gfs2_inode *dip, struct gfs2_inode *ip, */ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry, + struct file *file, umode_t mode, dev_t dev, const char *symname, - unsigned int size, int excl) + unsigned int size, int excl, int *opened) { const struct qstr *name = &dentry->d_name; struct gfs2_holder ghs[2]; @@ -564,6 +555,7 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry, struct gfs2_inode *dip = GFS2_I(dir), *ip; struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode); struct gfs2_glock *io_gl; + struct dentry *d; int error; u32 aflags = 0; int arq; @@ -584,15 +576,30 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry, goto fail; error = create_ok(dip, name, mode); - if ((error == -EEXIST) && S_ISREG(mode) && !excl) { - inode = gfs2_lookupi(dir, &dentry->d_name, 0); - gfs2_glock_dq_uninit(ghs); - d_instantiate(dentry, inode); - return IS_ERR(inode) ? PTR_ERR(inode) : 0; - } if (error) goto fail_gunlock; + inode = gfs2_dir_search(dir, &dentry->d_name, !S_ISREG(mode) || excl); + error = PTR_ERR(inode); + if (!IS_ERR(inode)) { + d = d_splice_alias(inode, dentry); + error = 0; + if (file && !IS_ERR(d)) { + if (d == NULL) + d = dentry; + if (S_ISREG(inode->i_mode)) + error = finish_open(file, d, gfs2_open_common, opened); + else + error = finish_no_open(file, d); + } + gfs2_glock_dq_uninit(ghs); + if (IS_ERR(d)) + return PTR_RET(d); + return error; + } else if (error != -ENOENT) { + goto fail_gunlock; + } + arq = error = gfs2_diradd_alloc_required(dir, name); if (error < 0) goto fail_gunlock; @@ -686,10 +693,12 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry, goto fail_gunlock3; mark_inode_dirty(inode); + d_instantiate(dentry, inode); + if (file) + error = finish_open(file, dentry, gfs2_open_common, opened); gfs2_glock_dq_uninit(ghs); gfs2_glock_dq_uninit(ghs + 1); - d_instantiate(dentry, inode); - return 0; + return error; fail_gunlock3: gfs2_glock_dq_uninit(ghs + 1); @@ -729,36 +738,56 @@ fail: static int gfs2_create(struct inode *dir, struct dentry *dentry, umode_t mode, bool excl) { - return gfs2_create_inode(dir, dentry, S_IFREG | mode, 0, NULL, 0, excl); + return gfs2_create_inode(dir, dentry, NULL, S_IFREG | mode, 0, NULL, 0, excl, NULL); } /** - * gfs2_lookup - Look up a filename in a directory and return its inode + * __gfs2_lookup - Look up a filename in a directory and return its inode * @dir: The directory inode * @dentry: The dentry of the new inode - * @nd: passed from Linux VFS, ignored by us + * @file: File to be opened + * @opened: atomic_open flags * - * Called by the VFS layer. Lock dir and call gfs2_lookupi() * * Returns: errno */ -static struct dentry *gfs2_lookup(struct inode *dir, struct dentry *dentry, - unsigned int flags) +static struct dentry *__gfs2_lookup(struct inode *dir, struct dentry *dentry, + struct file *file, int *opened) { - struct inode *inode = gfs2_lookupi(dir, &dentry->d_name, 0); - if (inode && !IS_ERR(inode)) { - struct gfs2_glock *gl = GFS2_I(inode)->i_gl; - struct gfs2_holder gh; - int error; - error = gfs2_glock_nq_init(gl, LM_ST_SHARED, LM_FLAG_ANY, &gh); - if (error) { - iput(inode); - return ERR_PTR(error); - } - gfs2_glock_dq_uninit(&gh); + struct inode *inode; + struct dentry *d; + struct gfs2_holder gh; + struct gfs2_glock *gl; + int error; + + inode = gfs2_lookupi(dir, &dentry->d_name, 0); + if (!inode) + return NULL; + if (IS_ERR(inode)) + return ERR_CAST(inode); + + gl = GFS2_I(inode)->i_gl; + error = gfs2_glock_nq_init(gl, LM_ST_SHARED, LM_FLAG_ANY, &gh); + if (error) { + iput(inode); + return ERR_PTR(error); } - return d_splice_alias(inode, dentry); + + d = d_splice_alias(inode, dentry); + if (file && S_ISREG(inode->i_mode)) + error = finish_open(file, dentry, gfs2_open_common, opened); + + gfs2_glock_dq_uninit(&gh); + if (error) + return ERR_PTR(error); + return d; +} + +static struct dentry *gfs2_lookup(struct inode *dir, struct dentry *dentry, + unsigned flags) +{ + return __gfs2_lookup(dir, dentry, NULL, NULL); } /** @@ -1076,7 +1105,7 @@ static int gfs2_symlink(struct inode *dir, struct dentry *dentry, if (size > sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode) - 1) return -ENAMETOOLONG; - return gfs2_create_inode(dir, dentry, S_IFLNK | S_IRWXUGO, 0, symname, size, 0); + return gfs2_create_inode(dir, dentry, NULL, S_IFLNK | S_IRWXUGO, 0, symname, size, 0, NULL); } /** @@ -1092,7 +1121,7 @@ static int gfs2_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) { struct gfs2_sbd *sdp = GFS2_SB(dir); unsigned dsize = sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode); - return gfs2_create_inode(dir, dentry, S_IFDIR | mode, 0, NULL, dsize, 0); + return gfs2_create_inode(dir, dentry, NULL, S_IFDIR | mode, 0, NULL, dsize, 0, NULL); } /** @@ -1107,7 +1136,43 @@ static int gfs2_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) static int gfs2_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev) { - return gfs2_create_inode(dir, dentry, mode, dev, NULL, 0, 0); + return gfs2_create_inode(dir, dentry, NULL, mode, dev, NULL, 0, 0, NULL); +} + +/** + * gfs2_atomic_open - Atomically open a file + * @dir: The directory + * @dentry: The proposed new entry + * @file: The proposed new struct file + * @flags: open flags + * @mode: File mode + * @opened: Flag to say whether the file has been opened or not + * + * Returns: error code or 0 for success + */ + +static int gfs2_atomic_open(struct inode *dir, struct dentry *dentry, + struct file *file, unsigned flags, + umode_t mode, int *opened) +{ + struct dentry *d; + bool excl = !!(flags & O_EXCL); + + d = __gfs2_lookup(dir, dentry, file, opened); + if (IS_ERR(d)) + return PTR_ERR(d); + if (d == NULL) + d = dentry; + if (d->d_inode) { + if (!(*opened & FILE_OPENED)) + return finish_no_open(file, d); + return 0; + } + + if (!(flags & O_CREAT)) + return -ENOENT; + + return gfs2_create_inode(dir, dentry, file, S_IFREG | mode, 0, NULL, 0, excl, opened); } /* @@ -1787,6 +1852,7 @@ const struct inode_operations gfs2_dir_iops = { .removexattr = gfs2_removexattr, .fiemap = gfs2_fiemap, .get_acl = gfs2_get_acl, + .atomic_open = gfs2_atomic_open, }; const struct inode_operations gfs2_symlink_iops = { diff --git a/fs/gfs2/inode.h b/fs/gfs2/inode.h index c53c7477f6da..ba4d9492d422 100644 --- a/fs/gfs2/inode.h +++ b/fs/gfs2/inode.h @@ -109,6 +109,7 @@ extern int gfs2_permission(struct inode *inode, int mask); extern int gfs2_setattr_simple(struct inode *inode, struct iattr *attr); extern struct inode *gfs2_lookup_simple(struct inode *dip, const char *name); extern void gfs2_dinode_out(const struct gfs2_inode *ip, void *buf); +extern int gfs2_open_common(struct inode *inode, struct file *file); extern const struct inode_operations gfs2_file_iops; extern const struct inode_operations gfs2_dir_iops; diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c index b404f4853034..610613fb65b5 100644 --- a/fs/gfs2/log.c +++ b/fs/gfs2/log.c @@ -211,15 +211,16 @@ static void gfs2_ail1_empty_one(struct gfs2_sbd *sdp, struct gfs2_trans *tr) static int gfs2_ail1_empty(struct gfs2_sbd *sdp) { struct gfs2_trans *tr, *s; + int oldest_tr = 1; int ret; spin_lock(&sdp->sd_ail_lock); list_for_each_entry_safe_reverse(tr, s, &sdp->sd_ail1_list, tr_list) { gfs2_ail1_empty_one(sdp, tr); - if (list_empty(&tr->tr_ail1_list)) + if (list_empty(&tr->tr_ail1_list) && oldest_tr) list_move(&tr->tr_list, &sdp->sd_ail2_list); else - break; + oldest_tr = 0; } ret = list_empty(&sdp->sd_ail1_list); spin_unlock(&sdp->sd_ail_lock); @@ -317,7 +318,7 @@ static void ail2_empty(struct gfs2_sbd *sdp, unsigned int new_tail) int gfs2_log_reserve(struct gfs2_sbd *sdp, unsigned int blks) { - unsigned reserved_blks = 6 * (4096 / sdp->sd_vfs->s_blocksize); + unsigned reserved_blks = 7 * (4096 / sdp->sd_vfs->s_blocksize); unsigned wanted = blks + reserved_blks; DEFINE_WAIT(wait); int did_wait = 0; @@ -545,6 +546,76 @@ void gfs2_ordered_del_inode(struct gfs2_inode *ip) spin_unlock(&sdp->sd_ordered_lock); } +void gfs2_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd) +{ + struct buffer_head *bh = bd->bd_bh; + struct gfs2_glock *gl = bd->bd_gl; + + gfs2_remove_from_ail(bd); + bd->bd_bh = NULL; + bh->b_private = NULL; + bd->bd_blkno = bh->b_blocknr; + bd->bd_ops = &gfs2_revoke_lops; + sdp->sd_log_num_revoke++; + atomic_inc(&gl->gl_revokes); + set_bit(GLF_LFLUSH, &gl->gl_flags); + list_add(&bd->bd_list, &sdp->sd_log_le_revoke); +} + +void gfs2_write_revokes(struct gfs2_sbd *sdp) +{ + struct gfs2_trans *tr; + struct gfs2_bufdata *bd, *tmp; + int have_revokes = 0; + int max_revokes = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_log_descriptor)) / sizeof(u64); + + gfs2_ail1_empty(sdp); + spin_lock(&sdp->sd_ail_lock); + list_for_each_entry(tr, &sdp->sd_ail1_list, tr_list) { + list_for_each_entry(bd, &tr->tr_ail2_list, bd_ail_st_list) { + if (list_empty(&bd->bd_list)) { + have_revokes = 1; + goto done; + } + } + } +done: + spin_unlock(&sdp->sd_ail_lock); + if (have_revokes == 0) + return; + while (sdp->sd_log_num_revoke > max_revokes) + max_revokes += (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_meta_header)) / sizeof(u64); + max_revokes -= sdp->sd_log_num_revoke; + if (!sdp->sd_log_num_revoke) { + atomic_dec(&sdp->sd_log_blks_free); + /* If no blocks have been reserved, we need to also + * reserve a block for the header */ + if (!sdp->sd_log_blks_reserved) + atomic_dec(&sdp->sd_log_blks_free); + } + gfs2_log_lock(sdp); + spin_lock(&sdp->sd_ail_lock); + list_for_each_entry(tr, &sdp->sd_ail1_list, tr_list) { + list_for_each_entry_safe(bd, tmp, &tr->tr_ail2_list, bd_ail_st_list) { + if (max_revokes == 0) + goto out_of_blocks; + if (!list_empty(&bd->bd_list)) + continue; + gfs2_add_revoke(sdp, bd); + max_revokes--; + } + } +out_of_blocks: + spin_unlock(&sdp->sd_ail_lock); + gfs2_log_unlock(sdp); + + if (!sdp->sd_log_num_revoke) { + atomic_inc(&sdp->sd_log_blks_free); + if (!sdp->sd_log_blks_reserved) + atomic_inc(&sdp->sd_log_blks_free); + } +} + /** * log_write_header - Get and initialize a journal header buffer * @sdp: The GFS2 superblock @@ -562,7 +633,6 @@ static void log_write_header(struct gfs2_sbd *sdp, u32 flags) lh = page_address(page); clear_page(lh); - gfs2_ail1_empty(sdp); tail = current_tail(sdp); lh->lh_header.mh_magic = cpu_to_be32(GFS2_MAGIC); diff --git a/fs/gfs2/log.h b/fs/gfs2/log.h index 3566f35915e0..37216634f0aa 100644 --- a/fs/gfs2/log.h +++ b/fs/gfs2/log.h @@ -72,5 +72,7 @@ extern void gfs2_ail1_flush(struct gfs2_sbd *sdp, struct writeback_control *wbc) extern void gfs2_log_shutdown(struct gfs2_sbd *sdp); extern void gfs2_meta_syncfs(struct gfs2_sbd *sdp); extern int gfs2_logd(void *data); +extern void gfs2_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd); +extern void gfs2_write_revokes(struct gfs2_sbd *sdp); #endif /* __LOG_DOT_H__ */ diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c index 6c33d7b6e0c4..17c5b5d7dc88 100644 --- a/fs/gfs2/lops.c +++ b/fs/gfs2/lops.c @@ -16,6 +16,7 @@ #include <linux/gfs2_ondisk.h> #include <linux/bio.h> #include <linux/fs.h> +#include <linux/list_sort.h> #include "gfs2.h" #include "incore.h" @@ -401,6 +402,20 @@ static void gfs2_check_magic(struct buffer_head *bh) kunmap_atomic(kaddr); } +static int blocknr_cmp(void *priv, struct list_head *a, struct list_head *b) +{ + struct gfs2_bufdata *bda, *bdb; + + bda = list_entry(a, struct gfs2_bufdata, bd_list); + bdb = list_entry(b, struct gfs2_bufdata, bd_list); + + if (bda->bd_bh->b_blocknr < bdb->bd_bh->b_blocknr) + return -1; + if (bda->bd_bh->b_blocknr > bdb->bd_bh->b_blocknr) + return 1; + return 0; +} + static void gfs2_before_commit(struct gfs2_sbd *sdp, unsigned int limit, unsigned int total, struct list_head *blist, bool is_databuf) @@ -413,6 +428,7 @@ static void gfs2_before_commit(struct gfs2_sbd *sdp, unsigned int limit, __be64 *ptr; gfs2_log_lock(sdp); + list_sort(NULL, blist, blocknr_cmp); bd1 = bd2 = list_prepare_entry(bd1, blist, bd_list); while(total) { num = total; @@ -590,6 +606,7 @@ static void revoke_lo_before_commit(struct gfs2_sbd *sdp) struct page *page; unsigned int length; + gfs2_write_revokes(sdp); if (!sdp->sd_log_num_revoke) return; @@ -836,10 +853,6 @@ const struct gfs2_log_operations gfs2_revoke_lops = { .lo_name = "revoke", }; -const struct gfs2_log_operations gfs2_rg_lops = { - .lo_name = "rg", -}; - const struct gfs2_log_operations gfs2_databuf_lops = { .lo_before_commit = databuf_lo_before_commit, .lo_after_commit = databuf_lo_after_commit, @@ -851,7 +864,6 @@ const struct gfs2_log_operations gfs2_databuf_lops = { const struct gfs2_log_operations *gfs2_log_ops[] = { &gfs2_databuf_lops, &gfs2_buf_lops, - &gfs2_rg_lops, &gfs2_revoke_lops, NULL, }; diff --git a/fs/gfs2/lops.h b/fs/gfs2/lops.h index 87e062e05c92..9ca2e6438419 100644 --- a/fs/gfs2/lops.h +++ b/fs/gfs2/lops.h @@ -23,7 +23,6 @@ extern const struct gfs2_log_operations gfs2_glock_lops; extern const struct gfs2_log_operations gfs2_buf_lops; extern const struct gfs2_log_operations gfs2_revoke_lops; -extern const struct gfs2_log_operations gfs2_rg_lops; extern const struct gfs2_log_operations gfs2_databuf_lops; extern const struct gfs2_log_operations *gfs2_log_ops[]; diff --git a/fs/gfs2/meta_io.c b/fs/gfs2/meta_io.c index 1a89afb68472..0da390686c08 100644 --- a/fs/gfs2/meta_io.c +++ b/fs/gfs2/meta_io.c @@ -296,10 +296,6 @@ void gfs2_remove_from_journal(struct buffer_head *bh, struct gfs2_trans *tr, int if (bd) { spin_lock(&sdp->sd_ail_lock); if (bd->bd_tr) { - gfs2_remove_from_ail(bd); - bh->b_private = NULL; - bd->bd_bh = NULL; - bd->bd_blkno = bh->b_blocknr; gfs2_trans_add_revoke(sdp, bd); } spin_unlock(&sdp->sd_ail_lock); diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c index 60ede2a0f43f..0262c190b6f9 100644 --- a/fs/gfs2/ops_fstype.c +++ b/fs/gfs2/ops_fstype.c @@ -916,16 +916,16 @@ static int init_threads(struct gfs2_sbd *sdp, int undo) goto fail_quotad; p = kthread_run(gfs2_logd, sdp, "gfs2_logd"); - error = IS_ERR(p); - if (error) { + if (IS_ERR(p)) { + error = PTR_ERR(p); fs_err(sdp, "can't start logd thread: %d\n", error); return error; } sdp->sd_logd_process = p; p = kthread_run(gfs2_quotad, sdp, "gfs2_quotad"); - error = IS_ERR(p); - if (error) { + if (IS_ERR(p)) { + error = PTR_ERR(p); fs_err(sdp, "can't start quotad thread: %d\n", error); goto fail; } diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c index c253b13722e8..3768c2f40e43 100644 --- a/fs/gfs2/quota.c +++ b/fs/gfs2/quota.c @@ -1154,11 +1154,6 @@ int gfs2_quota_sync(struct super_block *sb, int type) return error; } -static int gfs2_quota_sync_timeo(struct super_block *sb, int type) -{ - return gfs2_quota_sync(sb, type); -} - int gfs2_quota_refresh(struct gfs2_sbd *sdp, struct kqid qid) { struct gfs2_quota_data *qd; @@ -1414,7 +1409,7 @@ int gfs2_quotad(void *data) &tune->gt_statfs_quantum); /* Update quota file */ - quotad_check_timeo(sdp, "sync", gfs2_quota_sync_timeo, t, + quotad_check_timeo(sdp, "sync", gfs2_quota_sync, t, "ad_timeo, &tune->gt_quota_quantum); /* Check for & recover partially truncated inodes */ diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c index 9809156e3d04..69317435faa7 100644 --- a/fs/gfs2/rgrp.c +++ b/fs/gfs2/rgrp.c @@ -1288,13 +1288,15 @@ int gfs2_fitrim(struct file *filp, void __user *argp) minlen = max_t(u64, r.minlen, q->limits.discard_granularity) >> bs_shift; + if (end <= start || minlen > sdp->sd_max_rg_data) + return -EINVAL; + rgd = gfs2_blk2rgrpd(sdp, start, 0); - rgd_end = gfs2_blk2rgrpd(sdp, end - 1, 0); + rgd_end = gfs2_blk2rgrpd(sdp, end, 0); - if (end <= start || - minlen > sdp->sd_max_rg_data || - start > rgd_end->rd_data0 + rgd_end->rd_data) - return -EINVAL; + if ((gfs2_rgrpd_get_first(sdp) == gfs2_rgrpd_get_next(rgd_end)) + && (start > rgd_end->rd_data0 + rgd_end->rd_data)) + return -EINVAL; /* start is beyond the end of the fs */ while (1) { @@ -1336,7 +1338,7 @@ int gfs2_fitrim(struct file *filp, void __user *argp) } out: - r.len = trimmed << 9; + r.len = trimmed << bs_shift; if (copy_to_user(argp, &r, sizeof(r))) return -EFAULT; diff --git a/fs/gfs2/trans.c b/fs/gfs2/trans.c index 7374907742a8..2b20d7046bf3 100644 --- a/fs/gfs2/trans.c +++ b/fs/gfs2/trans.c @@ -270,19 +270,12 @@ void gfs2_trans_add_meta(struct gfs2_glock *gl, struct buffer_head *bh) void gfs2_trans_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd) { - struct gfs2_glock *gl = bd->bd_gl; struct gfs2_trans *tr = current->journal_info; BUG_ON(!list_empty(&bd->bd_list)); - BUG_ON(!list_empty(&bd->bd_ail_st_list)); - BUG_ON(!list_empty(&bd->bd_ail_gl_list)); - bd->bd_ops = &gfs2_revoke_lops; + gfs2_add_revoke(sdp, bd); tr->tr_touched = 1; tr->tr_num_revoke++; - sdp->sd_log_num_revoke++; - atomic_inc(&gl->gl_revokes); - set_bit(GLF_LFLUSH, &gl->gl_flags); - list_add(&bd->bd_list, &sdp->sd_log_le_revoke); } void gfs2_trans_add_unrevoke(struct gfs2_sbd *sdp, u64 blkno, unsigned int len) diff --git a/fs/hfs/dir.c b/fs/hfs/dir.c index e0101b6fb0d7..145566851e7a 100644 --- a/fs/hfs/dir.c +++ b/fs/hfs/dir.c @@ -51,9 +51,9 @@ done: /* * hfs_readdir */ -static int hfs_readdir(struct file *filp, void *dirent, filldir_t filldir) +static int hfs_readdir(struct file *file, struct dir_context *ctx) { - struct inode *inode = file_inode(filp); + struct inode *inode = file_inode(file); struct super_block *sb = inode->i_sb; int len, err; char strbuf[HFS_MAX_NAMELEN]; @@ -62,7 +62,7 @@ static int hfs_readdir(struct file *filp, void *dirent, filldir_t filldir) struct hfs_readdir_data *rd; u16 type; - if (filp->f_pos >= inode->i_size) + if (ctx->pos >= inode->i_size) return 0; err = hfs_find_init(HFS_SB(sb)->cat_tree, &fd); @@ -73,14 +73,13 @@ static int hfs_readdir(struct file *filp, void *dirent, filldir_t filldir) if (err) goto out; - switch ((u32)filp->f_pos) { - case 0: + if (ctx->pos == 0) { /* This is completely artificial... */ - if (filldir(dirent, ".", 1, 0, inode->i_ino, DT_DIR)) + if (!dir_emit_dot(file, ctx)) goto out; - filp->f_pos++; - /* fall through */ - case 1: + ctx->pos = 1; + } + if (ctx->pos == 1) { if (fd.entrylength > sizeof(entry) || fd.entrylength < 0) { err = -EIO; goto out; @@ -97,18 +96,16 @@ static int hfs_readdir(struct file *filp, void *dirent, filldir_t filldir) // err = -EIO; // goto out; //} - if (filldir(dirent, "..", 2, 1, + if (!dir_emit(ctx, "..", 2, be32_to_cpu(entry.thread.ParID), DT_DIR)) goto out; - filp->f_pos++; - /* fall through */ - default: - if (filp->f_pos >= inode->i_size) - goto out; - err = hfs_brec_goto(&fd, filp->f_pos - 1); - if (err) - goto out; + ctx->pos = 2; } + if (ctx->pos >= inode->i_size) + goto out; + err = hfs_brec_goto(&fd, ctx->pos - 1); + if (err) + goto out; for (;;) { if (be32_to_cpu(fd.key->cat.ParID) != inode->i_ino) { @@ -131,7 +128,7 @@ static int hfs_readdir(struct file *filp, void *dirent, filldir_t filldir) err = -EIO; goto out; } - if (filldir(dirent, strbuf, len, filp->f_pos, + if (!dir_emit(ctx, strbuf, len, be32_to_cpu(entry.dir.DirID), DT_DIR)) break; } else if (type == HFS_CDR_FIL) { @@ -140,7 +137,7 @@ static int hfs_readdir(struct file *filp, void *dirent, filldir_t filldir) err = -EIO; goto out; } - if (filldir(dirent, strbuf, len, filp->f_pos, + if (!dir_emit(ctx, strbuf, len, be32_to_cpu(entry.file.FlNum), DT_REG)) break; } else { @@ -148,22 +145,22 @@ static int hfs_readdir(struct file *filp, void *dirent, filldir_t filldir) err = -EIO; goto out; } - filp->f_pos++; - if (filp->f_pos >= inode->i_size) + ctx->pos++; + if (ctx->pos >= inode->i_size) goto out; err = hfs_brec_goto(&fd, 1); if (err) goto out; } - rd = filp->private_data; + rd = file->private_data; if (!rd) { rd = kmalloc(sizeof(struct hfs_readdir_data), GFP_KERNEL); if (!rd) { err = -ENOMEM; goto out; } - filp->private_data = rd; - rd->file = filp; + file->private_data = rd; + rd->file = file; list_add(&rd->list, &HFS_I(inode)->open_dir_list); } memcpy(&rd->key, &fd.key, sizeof(struct hfs_cat_key)); @@ -306,7 +303,7 @@ static int hfs_rename(struct inode *old_dir, struct dentry *old_dentry, const struct file_operations hfs_dir_operations = { .read = generic_read_dir, - .readdir = hfs_readdir, + .iterate = hfs_readdir, .llseek = generic_file_llseek, .release = hfs_dir_release, }; diff --git a/fs/hfs/hfs_fs.h b/fs/hfs/hfs_fs.h index a73b11839a41..0524cda47a6e 100644 --- a/fs/hfs/hfs_fs.h +++ b/fs/hfs/hfs_fs.h @@ -229,13 +229,10 @@ extern int hfs_part_find(struct super_block *, sector_t *, sector_t *); /* string.c */ extern const struct dentry_operations hfs_dentry_operations; -extern int hfs_hash_dentry(const struct dentry *, const struct inode *, - struct qstr *); +extern int hfs_hash_dentry(const struct dentry *, struct qstr *); extern int hfs_strcmp(const unsigned char *, unsigned int, const unsigned char *, unsigned int); -extern int hfs_compare_dentry(const struct dentry *parent, - const struct inode *pinode, - const struct dentry *dentry, const struct inode *inode, +extern int hfs_compare_dentry(const struct dentry *parent, const struct dentry *dentry, unsigned int len, const char *str, const struct qstr *name); /* trans.c */ diff --git a/fs/hfs/string.c b/fs/hfs/string.c index 495a976a3cc9..85b610c3909f 100644 --- a/fs/hfs/string.c +++ b/fs/hfs/string.c @@ -51,8 +51,7 @@ static unsigned char caseorder[256] = { /* * Hash a string to an integer in a case-independent way */ -int hfs_hash_dentry(const struct dentry *dentry, const struct inode *inode, - struct qstr *this) +int hfs_hash_dentry(const struct dentry *dentry, struct qstr *this) { const unsigned char *name = this->name; unsigned int hash, len = this->len; @@ -93,8 +92,7 @@ int hfs_strcmp(const unsigned char *s1, unsigned int len1, * Test for equality of two strings in the HFS filename character ordering. * return 1 on failure and 0 on success */ -int hfs_compare_dentry(const struct dentry *parent, const struct inode *pinode, - const struct dentry *dentry, const struct inode *inode, +int hfs_compare_dentry(const struct dentry *parent, const struct dentry *dentry, unsigned int len, const char *str, const struct qstr *name) { const unsigned char *n1, *n2; diff --git a/fs/hfsplus/dir.c b/fs/hfsplus/dir.c index a37ac934732f..d8ce4bd17fc5 100644 --- a/fs/hfsplus/dir.c +++ b/fs/hfsplus/dir.c @@ -121,9 +121,9 @@ fail: return ERR_PTR(err); } -static int hfsplus_readdir(struct file *filp, void *dirent, filldir_t filldir) +static int hfsplus_readdir(struct file *file, struct dir_context *ctx) { - struct inode *inode = file_inode(filp); + struct inode *inode = file_inode(file); struct super_block *sb = inode->i_sb; int len, err; char strbuf[HFSPLUS_MAX_STRLEN + 1]; @@ -132,7 +132,7 @@ static int hfsplus_readdir(struct file *filp, void *dirent, filldir_t filldir) struct hfsplus_readdir_data *rd; u16 type; - if (filp->f_pos >= inode->i_size) + if (file->f_pos >= inode->i_size) return 0; err = hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd); @@ -143,14 +143,13 @@ static int hfsplus_readdir(struct file *filp, void *dirent, filldir_t filldir) if (err) goto out; - switch ((u32)filp->f_pos) { - case 0: + if (ctx->pos == 0) { /* This is completely artificial... */ - if (filldir(dirent, ".", 1, 0, inode->i_ino, DT_DIR)) + if (!dir_emit_dot(file, ctx)) goto out; - filp->f_pos++; - /* fall through */ - case 1: + ctx->pos = 1; + } + if (ctx->pos == 1) { if (fd.entrylength > sizeof(entry) || fd.entrylength < 0) { err = -EIO; goto out; @@ -168,19 +167,16 @@ static int hfsplus_readdir(struct file *filp, void *dirent, filldir_t filldir) err = -EIO; goto out; } - if (filldir(dirent, "..", 2, 1, + if (!dir_emit(ctx, "..", 2, be32_to_cpu(entry.thread.parentID), DT_DIR)) goto out; - filp->f_pos++; - /* fall through */ - default: - if (filp->f_pos >= inode->i_size) - goto out; - err = hfs_brec_goto(&fd, filp->f_pos - 1); - if (err) - goto out; + ctx->pos = 2; } - + if (ctx->pos >= inode->i_size) + goto out; + err = hfs_brec_goto(&fd, ctx->pos - 1); + if (err) + goto out; for (;;) { if (be32_to_cpu(fd.key->cat.parent) != inode->i_ino) { pr_err("walked past end of dir\n"); @@ -211,7 +207,7 @@ static int hfsplus_readdir(struct file *filp, void *dirent, filldir_t filldir) HFSPLUS_SB(sb)->hidden_dir->i_ino == be32_to_cpu(entry.folder.id)) goto next; - if (filldir(dirent, strbuf, len, filp->f_pos, + if (!dir_emit(ctx, strbuf, len, be32_to_cpu(entry.folder.id), DT_DIR)) break; } else if (type == HFSPLUS_FILE) { @@ -220,7 +216,7 @@ static int hfsplus_readdir(struct file *filp, void *dirent, filldir_t filldir) err = -EIO; goto out; } - if (filldir(dirent, strbuf, len, filp->f_pos, + if (!dir_emit(ctx, strbuf, len, be32_to_cpu(entry.file.id), DT_REG)) break; } else { @@ -229,22 +225,22 @@ static int hfsplus_readdir(struct file *filp, void *dirent, filldir_t filldir) goto out; } next: - filp->f_pos++; - if (filp->f_pos >= inode->i_size) + ctx->pos++; + if (ctx->pos >= inode->i_size) goto out; err = hfs_brec_goto(&fd, 1); if (err) goto out; } - rd = filp->private_data; + rd = file->private_data; if (!rd) { rd = kmalloc(sizeof(struct hfsplus_readdir_data), GFP_KERNEL); if (!rd) { err = -ENOMEM; goto out; } - filp->private_data = rd; - rd->file = filp; + file->private_data = rd; + rd->file = file; list_add(&rd->list, &HFSPLUS_I(inode)->open_dir_list); } memcpy(&rd->key, fd.key, sizeof(struct hfsplus_cat_key)); @@ -538,7 +534,7 @@ const struct inode_operations hfsplus_dir_inode_operations = { const struct file_operations hfsplus_dir_operations = { .fsync = hfsplus_file_fsync, .read = generic_read_dir, - .readdir = hfsplus_readdir, + .iterate = hfsplus_readdir, .unlocked_ioctl = hfsplus_ioctl, .llseek = generic_file_llseek, .release = hfsplus_dir_release, diff --git a/fs/hfsplus/hfsplus_fs.h b/fs/hfsplus/hfsplus_fs.h index 60b0a3388b26..ede79317cfb8 100644 --- a/fs/hfsplus/hfsplus_fs.h +++ b/fs/hfsplus/hfsplus_fs.h @@ -495,11 +495,8 @@ int hfsplus_uni2asc(struct super_block *, const struct hfsplus_unistr *, char *, int *); int hfsplus_asc2uni(struct super_block *, struct hfsplus_unistr *, int, const char *, int); -int hfsplus_hash_dentry(const struct dentry *dentry, - const struct inode *inode, struct qstr *str); -int hfsplus_compare_dentry(const struct dentry *parent, - const struct inode *pinode, - const struct dentry *dentry, const struct inode *inode, +int hfsplus_hash_dentry(const struct dentry *dentry, struct qstr *str); +int hfsplus_compare_dentry(const struct dentry *parent, const struct dentry *dentry, unsigned int len, const char *str, const struct qstr *name); /* wrapper.c */ diff --git a/fs/hfsplus/unicode.c b/fs/hfsplus/unicode.c index 2c2e47dcfdd8..e8ef121a4d8b 100644 --- a/fs/hfsplus/unicode.c +++ b/fs/hfsplus/unicode.c @@ -334,8 +334,7 @@ int hfsplus_asc2uni(struct super_block *sb, * Composed unicode characters are decomposed and case-folding is performed * if the appropriate bits are (un)set on the superblock. */ -int hfsplus_hash_dentry(const struct dentry *dentry, const struct inode *inode, - struct qstr *str) +int hfsplus_hash_dentry(const struct dentry *dentry, struct qstr *str) { struct super_block *sb = dentry->d_sb; const char *astr; @@ -386,9 +385,7 @@ int hfsplus_hash_dentry(const struct dentry *dentry, const struct inode *inode, * Composed unicode characters are decomposed and case-folding is performed * if the appropriate bits are (un)set on the superblock. */ -int hfsplus_compare_dentry(const struct dentry *parent, - const struct inode *pinode, - const struct dentry *dentry, const struct inode *inode, +int hfsplus_compare_dentry(const struct dentry *parent, const struct dentry *dentry, unsigned int len, const char *str, const struct qstr *name) { struct super_block *sb = parent->d_sb; diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c index 32f35f187989..cddb05217512 100644 --- a/fs/hostfs/hostfs_kern.c +++ b/fs/hostfs/hostfs_kern.c @@ -277,7 +277,7 @@ static const struct super_operations hostfs_sbops = { .show_options = hostfs_show_options, }; -int hostfs_readdir(struct file *file, void *ent, filldir_t filldir) +int hostfs_readdir(struct file *file, struct dir_context *ctx) { void *dir; char *name; @@ -292,12 +292,11 @@ int hostfs_readdir(struct file *file, void *ent, filldir_t filldir) __putname(name); if (dir == NULL) return -error; - next = file->f_pos; + next = ctx->pos; while ((name = read_dir(dir, &next, &ino, &len, &type)) != NULL) { - error = (*filldir)(ent, name, len, file->f_pos, - ino, type); - if (error) break; - file->f_pos = next; + if (!dir_emit(ctx, name, len, ino, type)) + break; + ctx->pos = next; } close_dir(dir); return 0; @@ -393,7 +392,7 @@ static const struct file_operations hostfs_file_fops = { static const struct file_operations hostfs_dir_fops = { .llseek = generic_file_llseek, - .readdir = hostfs_readdir, + .iterate = hostfs_readdir, .read = generic_read_dir, }; diff --git a/fs/hpfs/buffer.c b/fs/hpfs/buffer.c index f49d1498aa2e..4d0a1afa058c 100644 --- a/fs/hpfs/buffer.c +++ b/fs/hpfs/buffer.c @@ -7,8 +7,37 @@ */ #include <linux/sched.h> #include <linux/slab.h> +#include <linux/blkdev.h> #include "hpfs_fn.h" +void hpfs_prefetch_sectors(struct super_block *s, unsigned secno, int n) +{ + struct buffer_head *bh; + struct blk_plug plug; + + if (n <= 0 || unlikely(secno >= hpfs_sb(s)->sb_fs_size)) + return; + + bh = sb_find_get_block(s, secno); + if (bh) { + if (buffer_uptodate(bh)) { + brelse(bh); + return; + } + brelse(bh); + }; + + blk_start_plug(&plug); + while (n > 0) { + if (unlikely(secno >= hpfs_sb(s)->sb_fs_size)) + break; + sb_breadahead(s, secno); + secno++; + n--; + } + blk_finish_plug(&plug); +} + /* Map a sector into a buffer and return pointers to it and to the buffer. */ void *hpfs_map_sector(struct super_block *s, unsigned secno, struct buffer_head **bhp, @@ -18,6 +47,8 @@ void *hpfs_map_sector(struct super_block *s, unsigned secno, struct buffer_head hpfs_lock_assert(s); + hpfs_prefetch_sectors(s, secno, ahead); + cond_resched(); *bhp = bh = sb_bread(s, secno); @@ -67,6 +98,8 @@ void *hpfs_map_4sectors(struct super_block *s, unsigned secno, struct quad_buffe return NULL; } + hpfs_prefetch_sectors(s, secno, 4 + ahead); + qbh->data = data = kmalloc(2048, GFP_NOFS); if (!data) { printk("HPFS: hpfs_map_4sectors: out of memory\n"); diff --git a/fs/hpfs/dentry.c b/fs/hpfs/dentry.c index 05d4816e4e77..fa27980f2229 100644 --- a/fs/hpfs/dentry.c +++ b/fs/hpfs/dentry.c @@ -12,8 +12,7 @@ * Note: the dentry argument is the parent dentry. */ -static int hpfs_hash_dentry(const struct dentry *dentry, const struct inode *inode, - struct qstr *qstr) +static int hpfs_hash_dentry(const struct dentry *dentry, struct qstr *qstr) { unsigned long hash; int i; @@ -35,9 +34,7 @@ static int hpfs_hash_dentry(const struct dentry *dentry, const struct inode *ino return 0; } -static int hpfs_compare_dentry(const struct dentry *parent, - const struct inode *pinode, - const struct dentry *dentry, const struct inode *inode, +static int hpfs_compare_dentry(const struct dentry *parent, const struct dentry *dentry, unsigned int len, const char *str, const struct qstr *name) { unsigned al = len; diff --git a/fs/hpfs/dir.c b/fs/hpfs/dir.c index 834ac13c04b7..292b1acb9b81 100644 --- a/fs/hpfs/dir.c +++ b/fs/hpfs/dir.c @@ -57,14 +57,14 @@ fail: return -ESPIPE; } -static int hpfs_readdir(struct file *filp, void *dirent, filldir_t filldir) +static int hpfs_readdir(struct file *file, struct dir_context *ctx) { - struct inode *inode = file_inode(filp); + struct inode *inode = file_inode(file); struct hpfs_inode_info *hpfs_inode = hpfs_i(inode); struct quad_buffer_head qbh; struct hpfs_dirent *de; int lc; - long old_pos; + loff_t next_pos; unsigned char *tempname; int c1, c2 = 0; int ret = 0; @@ -105,11 +105,11 @@ static int hpfs_readdir(struct file *filp, void *dirent, filldir_t filldir) } } lc = hpfs_sb(inode->i_sb)->sb_lowercase; - if (filp->f_pos == 12) { /* diff -r requires this (note, that diff -r */ - filp->f_pos = 13; /* also fails on msdos filesystem in 2.0) */ + if (ctx->pos == 12) { /* diff -r requires this (note, that diff -r */ + ctx->pos = 13; /* also fails on msdos filesystem in 2.0) */ goto out; } - if (filp->f_pos == 13) { + if (ctx->pos == 13) { ret = -ENOENT; goto out; } @@ -120,33 +120,34 @@ static int hpfs_readdir(struct file *filp, void *dirent, filldir_t filldir) accepted by filldir, but what can I do? maybe killall -9 ls helps */ if (hpfs_sb(inode->i_sb)->sb_chk) - if (hpfs_stop_cycles(inode->i_sb, filp->f_pos, &c1, &c2, "hpfs_readdir")) { + if (hpfs_stop_cycles(inode->i_sb, ctx->pos, &c1, &c2, "hpfs_readdir")) { ret = -EFSERROR; goto out; } - if (filp->f_pos == 12) + if (ctx->pos == 12) goto out; - if (filp->f_pos == 3 || filp->f_pos == 4 || filp->f_pos == 5) { - printk("HPFS: warning: pos==%d\n",(int)filp->f_pos); + if (ctx->pos == 3 || ctx->pos == 4 || ctx->pos == 5) { + printk("HPFS: warning: pos==%d\n",(int)ctx->pos); goto out; } - if (filp->f_pos == 0) { - if (filldir(dirent, ".", 1, filp->f_pos, inode->i_ino, DT_DIR) < 0) + if (ctx->pos == 0) { + if (!dir_emit_dot(file, ctx)) goto out; - filp->f_pos = 11; + ctx->pos = 11; } - if (filp->f_pos == 11) { - if (filldir(dirent, "..", 2, filp->f_pos, hpfs_inode->i_parent_dir, DT_DIR) < 0) + if (ctx->pos == 11) { + if (!dir_emit(ctx, "..", 2, hpfs_inode->i_parent_dir, DT_DIR)) goto out; - filp->f_pos = 1; + ctx->pos = 1; } - if (filp->f_pos == 1) { - filp->f_pos = ((loff_t) hpfs_de_as_down_as_possible(inode->i_sb, hpfs_inode->i_dno) << 4) + 1; - hpfs_add_pos(inode, &filp->f_pos); - filp->f_version = inode->i_version; + if (ctx->pos == 1) { + ctx->pos = ((loff_t) hpfs_de_as_down_as_possible(inode->i_sb, hpfs_inode->i_dno) << 4) + 1; + hpfs_add_pos(inode, &file->f_pos); + file->f_version = inode->i_version; } - old_pos = filp->f_pos; - if (!(de = map_pos_dirent(inode, &filp->f_pos, &qbh))) { + next_pos = ctx->pos; + if (!(de = map_pos_dirent(inode, &next_pos, &qbh))) { + ctx->pos = next_pos; ret = -EIOERROR; goto out; } @@ -154,20 +155,21 @@ static int hpfs_readdir(struct file *filp, void *dirent, filldir_t filldir) if (hpfs_sb(inode->i_sb)->sb_chk) { if (de->first && !de->last && (de->namelen != 2 || de ->name[0] != 1 || de->name[1] != 1)) - hpfs_error(inode->i_sb, "hpfs_readdir: bad ^A^A entry; pos = %08lx", old_pos); + hpfs_error(inode->i_sb, "hpfs_readdir: bad ^A^A entry; pos = %08lx", (unsigned long)ctx->pos); if (de->last && (de->namelen != 1 || de ->name[0] != 255)) - hpfs_error(inode->i_sb, "hpfs_readdir: bad \\377 entry; pos = %08lx", old_pos); + hpfs_error(inode->i_sb, "hpfs_readdir: bad \\377 entry; pos = %08lx", (unsigned long)ctx->pos); } hpfs_brelse4(&qbh); + ctx->pos = next_pos; goto again; } tempname = hpfs_translate_name(inode->i_sb, de->name, de->namelen, lc, de->not_8x3); - if (filldir(dirent, tempname, de->namelen, old_pos, le32_to_cpu(de->fnode), DT_UNKNOWN) < 0) { - filp->f_pos = old_pos; + if (!dir_emit(ctx, tempname, de->namelen, le32_to_cpu(de->fnode), DT_UNKNOWN)) { if (tempname != de->name) kfree(tempname); hpfs_brelse4(&qbh); goto out; } + ctx->pos = next_pos; if (tempname != de->name) kfree(tempname); hpfs_brelse4(&qbh); } @@ -322,7 +324,7 @@ const struct file_operations hpfs_dir_ops = { .llseek = hpfs_dir_lseek, .read = generic_read_dir, - .readdir = hpfs_readdir, + .iterate = hpfs_readdir, .release = hpfs_dir_release, .fsync = hpfs_file_fsync, }; diff --git a/fs/hpfs/file.c b/fs/hpfs/file.c index e4ba5fe4c3b5..4e9dabcf1f4c 100644 --- a/fs/hpfs/file.c +++ b/fs/hpfs/file.c @@ -7,6 +7,7 @@ */ #include "hpfs_fn.h" +#include <linux/mpage.h> #define BLOCKS(size) (((size) + 511) >> 9) @@ -34,7 +35,7 @@ int hpfs_file_fsync(struct file *file, loff_t start, loff_t end, int datasync) * so we must ignore such errors. */ -static secno hpfs_bmap(struct inode *inode, unsigned file_secno) +static secno hpfs_bmap(struct inode *inode, unsigned file_secno, unsigned *n_secs) { struct hpfs_inode_info *hpfs_inode = hpfs_i(inode); unsigned n, disk_secno; @@ -42,11 +43,20 @@ static secno hpfs_bmap(struct inode *inode, unsigned file_secno) struct buffer_head *bh; if (BLOCKS(hpfs_i(inode)->mmu_private) <= file_secno) return 0; n = file_secno - hpfs_inode->i_file_sec; - if (n < hpfs_inode->i_n_secs) return hpfs_inode->i_disk_sec + n; + if (n < hpfs_inode->i_n_secs) { + *n_secs = hpfs_inode->i_n_secs - n; + return hpfs_inode->i_disk_sec + n; + } if (!(fnode = hpfs_map_fnode(inode->i_sb, inode->i_ino, &bh))) return 0; disk_secno = hpfs_bplus_lookup(inode->i_sb, inode, &fnode->btree, file_secno, bh); if (disk_secno == -1) return 0; if (hpfs_chk_sectors(inode->i_sb, disk_secno, 1, "bmap")) return 0; + n = file_secno - hpfs_inode->i_file_sec; + if (n < hpfs_inode->i_n_secs) { + *n_secs = hpfs_inode->i_n_secs - n; + return hpfs_inode->i_disk_sec + n; + } + *n_secs = 1; return disk_secno; } @@ -67,10 +77,14 @@ static int hpfs_get_block(struct inode *inode, sector_t iblock, struct buffer_he { int r; secno s; + unsigned n_secs; hpfs_lock(inode->i_sb); - s = hpfs_bmap(inode, iblock); + s = hpfs_bmap(inode, iblock, &n_secs); if (s) { + if (bh_result->b_size >> 9 < n_secs) + n_secs = bh_result->b_size >> 9; map_bh(bh_result, inode->i_sb, s); + bh_result->b_size = n_secs << 9; goto ret_0; } if (!create) goto ret_0; @@ -95,14 +109,26 @@ static int hpfs_get_block(struct inode *inode, sector_t iblock, struct buffer_he return r; } +static int hpfs_readpage(struct file *file, struct page *page) +{ + return mpage_readpage(page, hpfs_get_block); +} + static int hpfs_writepage(struct page *page, struct writeback_control *wbc) { - return block_write_full_page(page,hpfs_get_block, wbc); + return block_write_full_page(page, hpfs_get_block, wbc); } -static int hpfs_readpage(struct file *file, struct page *page) +static int hpfs_readpages(struct file *file, struct address_space *mapping, + struct list_head *pages, unsigned nr_pages) +{ + return mpage_readpages(mapping, pages, nr_pages, hpfs_get_block); +} + +static int hpfs_writepages(struct address_space *mapping, + struct writeback_control *wbc) { - return block_read_full_page(page,hpfs_get_block); + return mpage_writepages(mapping, wbc, hpfs_get_block); } static void hpfs_write_failed(struct address_space *mapping, loff_t to) @@ -161,6 +187,8 @@ static sector_t _hpfs_bmap(struct address_space *mapping, sector_t block) const struct address_space_operations hpfs_aops = { .readpage = hpfs_readpage, .writepage = hpfs_writepage, + .readpages = hpfs_readpages, + .writepages = hpfs_writepages, .write_begin = hpfs_write_begin, .write_end = hpfs_write_end, .bmap = _hpfs_bmap diff --git a/fs/hpfs/hpfs_fn.h b/fs/hpfs/hpfs_fn.h index b7ae286646b5..1b398636e990 100644 --- a/fs/hpfs/hpfs_fn.h +++ b/fs/hpfs/hpfs_fn.h @@ -27,8 +27,9 @@ #define ALLOC_FWD_MAX 128 #define ALLOC_M 1 #define FNODE_RD_AHEAD 16 -#define ANODE_RD_AHEAD 16 -#define DNODE_RD_AHEAD 4 +#define ANODE_RD_AHEAD 0 +#define DNODE_RD_AHEAD 72 +#define COUNT_RD_AHEAD 62 #define FREE_DNODES_ADD 58 #define FREE_DNODES_DEL 29 @@ -207,6 +208,7 @@ void hpfs_remove_fnode(struct super_block *, fnode_secno fno); /* buffer.c */ +void hpfs_prefetch_sectors(struct super_block *, unsigned, int); void *hpfs_map_sector(struct super_block *, unsigned, struct buffer_head **, int); void *hpfs_get_sector(struct super_block *, unsigned, struct buffer_head **); void *hpfs_map_4sectors(struct super_block *, unsigned, struct quad_buffer_head *, int); @@ -271,6 +273,7 @@ void hpfs_evict_inode(struct inode *); __le32 *hpfs_map_dnode_bitmap(struct super_block *, struct quad_buffer_head *); __le32 *hpfs_map_bitmap(struct super_block *, unsigned, struct quad_buffer_head *, char *); +void hpfs_prefetch_bitmap(struct super_block *, unsigned); unsigned char *hpfs_load_code_page(struct super_block *, secno); __le32 *hpfs_load_bitmap_directory(struct super_block *, secno bmp); struct fnode *hpfs_map_fnode(struct super_block *s, ino_t, struct buffer_head **); diff --git a/fs/hpfs/map.c b/fs/hpfs/map.c index 4acb19d78359..3aa66ae1031e 100644 --- a/fs/hpfs/map.c +++ b/fs/hpfs/map.c @@ -17,7 +17,9 @@ __le32 *hpfs_map_bitmap(struct super_block *s, unsigned bmp_block, struct quad_buffer_head *qbh, char *id) { secno sec; - if (hpfs_sb(s)->sb_chk) if (bmp_block * 16384 > hpfs_sb(s)->sb_fs_size) { + __le32 *ret; + unsigned n_bands = (hpfs_sb(s)->sb_fs_size + 0x3fff) >> 14; + if (hpfs_sb(s)->sb_chk) if (bmp_block >= n_bands) { hpfs_error(s, "hpfs_map_bitmap called with bad parameter: %08x at %s", bmp_block, id); return NULL; } @@ -26,7 +28,23 @@ __le32 *hpfs_map_bitmap(struct super_block *s, unsigned bmp_block, hpfs_error(s, "invalid bitmap block pointer %08x -> %08x at %s", bmp_block, sec, id); return NULL; } - return hpfs_map_4sectors(s, sec, qbh, 4); + ret = hpfs_map_4sectors(s, sec, qbh, 4); + if (ret) hpfs_prefetch_bitmap(s, bmp_block + 1); + return ret; +} + +void hpfs_prefetch_bitmap(struct super_block *s, unsigned bmp_block) +{ + unsigned to_prefetch, next_prefetch; + unsigned n_bands = (hpfs_sb(s)->sb_fs_size + 0x3fff) >> 14; + if (unlikely(bmp_block >= n_bands)) + return; + to_prefetch = le32_to_cpu(hpfs_sb(s)->sb_bmp_dir[bmp_block]); + if (unlikely(bmp_block + 1 >= n_bands)) + next_prefetch = 0; + else + next_prefetch = le32_to_cpu(hpfs_sb(s)->sb_bmp_dir[bmp_block + 1]); + hpfs_prefetch_sectors(s, to_prefetch, 4 + 4 * (to_prefetch + 4 == next_prefetch)); } /* diff --git a/fs/hpfs/super.c b/fs/hpfs/super.c index a0617e706957..4334cda8dba1 100644 --- a/fs/hpfs/super.c +++ b/fs/hpfs/super.c @@ -121,7 +121,7 @@ unsigned hpfs_count_one_bitmap(struct super_block *s, secno secno) unsigned long *bits; unsigned count; - bits = hpfs_map_4sectors(s, secno, &qbh, 4); + bits = hpfs_map_4sectors(s, secno, &qbh, 0); if (!bits) return 0; count = bitmap_weight(bits, 2048 * BITS_PER_BYTE); @@ -134,8 +134,13 @@ static unsigned count_bitmaps(struct super_block *s) unsigned n, count, n_bands; n_bands = (hpfs_sb(s)->sb_fs_size + 0x3fff) >> 14; count = 0; - for (n = 0; n < n_bands; n++) + for (n = 0; n < COUNT_RD_AHEAD; n++) { + hpfs_prefetch_bitmap(s, n); + } + for (n = 0; n < n_bands; n++) { + hpfs_prefetch_bitmap(s, n + COUNT_RD_AHEAD); count += hpfs_count_one_bitmap(s, le32_to_cpu(hpfs_sb(s)->sb_bmp_dir[n])); + } return count; } @@ -558,7 +563,13 @@ static int hpfs_fill_super(struct super_block *s, void *options, int silent) sbi->sb_cp_table = NULL; sbi->sb_c_bitmap = -1; sbi->sb_max_fwd_alloc = 0xffffff; - + + if (sbi->sb_fs_size >= 0x80000000) { + hpfs_error(s, "invalid size in superblock: %08x", + (unsigned)sbi->sb_fs_size); + goto bail4; + } + /* Load bitmap directory */ if (!(sbi->sb_bmp_dir = hpfs_load_bitmap_directory(s, le32_to_cpu(superblock->bitmaps)))) goto bail4; diff --git a/fs/hppfs/hppfs.c b/fs/hppfs/hppfs.c index cd3e38972c86..4338ff32959d 100644 --- a/fs/hppfs/hppfs.c +++ b/fs/hppfs/hppfs.c @@ -69,7 +69,7 @@ static char *dentry_name(struct dentry *dentry, int extra) struct dentry *parent; char *root, *name; const char *seg_name; - int len, seg_len; + int len, seg_len, root_len; len = 0; parent = dentry; @@ -81,7 +81,8 @@ static char *dentry_name(struct dentry *dentry, int extra) } root = "proc"; - len += strlen(root); + root_len = strlen(root); + len += root_len; name = kmalloc(len + extra + 1, GFP_KERNEL); if (name == NULL) return NULL; @@ -91,7 +92,7 @@ static char *dentry_name(struct dentry *dentry, int extra) while (parent->d_parent != parent) { if (is_pid(parent)) { seg_name = "pid"; - seg_len = strlen("pid"); + seg_len = strlen(seg_name); } else { seg_name = parent->d_name.name; @@ -100,10 +101,10 @@ static char *dentry_name(struct dentry *dentry, int extra) len -= seg_len + 1; name[len] = '/'; - strncpy(&name[len + 1], seg_name, seg_len); + memcpy(&name[len + 1], seg_name, seg_len); parent = parent->d_parent; } - strncpy(name, root, strlen(root)); + memcpy(name, root, root_len); return name; } @@ -542,8 +543,8 @@ static const struct file_operations hppfs_file_fops = { }; struct hppfs_dirent { - void *vfs_dirent; - filldir_t filldir; + struct dir_context ctx; + struct dir_context *caller; struct dentry *dentry; }; @@ -555,34 +556,29 @@ static int hppfs_filldir(void *d, const char *name, int size, if (file_removed(dirent->dentry, name)) return 0; - return (*dirent->filldir)(dirent->vfs_dirent, name, size, offset, - inode, type); + dirent->caller->pos = dirent->ctx.pos; + return !dir_emit(dirent->caller, name, size, inode, type); } -static int hppfs_readdir(struct file *file, void *ent, filldir_t filldir) +static int hppfs_readdir(struct file *file, struct dir_context *ctx) { struct hppfs_private *data = file->private_data; struct file *proc_file = data->proc_file; - int (*readdir)(struct file *, void *, filldir_t); - struct hppfs_dirent dirent = ((struct hppfs_dirent) - { .vfs_dirent = ent, - .filldir = filldir, - .dentry = file->f_path.dentry - }); + struct hppfs_dirent d = { + .ctx.actor = hppfs_filldir, + .caller = ctx, + .dentry = file->f_path.dentry + }; int err; - - readdir = file_inode(proc_file)->i_fop->readdir; - - proc_file->f_pos = file->f_pos; - err = (*readdir)(proc_file, &dirent, hppfs_filldir); - file->f_pos = proc_file->f_pos; - + proc_file->f_pos = ctx->pos; + err = iterate_dir(proc_file, &d.ctx); + ctx->pos = d.ctx.pos; return err; } static const struct file_operations hppfs_dir_fops = { .owner = NULL, - .readdir = hppfs_readdir, + .iterate = hppfs_readdir, .open = hppfs_dir_open, .llseek = default_llseek, .release = hppfs_release, diff --git a/fs/inode.c b/fs/inode.c index 00d5fc3b86e1..d6dfb09c8280 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -333,8 +333,10 @@ EXPORT_SYMBOL(set_nlink); */ void inc_nlink(struct inode *inode) { - if (WARN_ON(inode->i_nlink == 0)) + if (unlikely(inode->i_nlink == 0)) { + WARN_ON(!(inode->i_state & I_LINKABLE)); atomic_long_dec(&inode->i_sb->s_remove_count); + } inode->__i_nlink++; } diff --git a/fs/internal.h b/fs/internal.h index eaa75f75b625..7c5f01cf619d 100644 --- a/fs/internal.h +++ b/fs/internal.h @@ -96,11 +96,12 @@ struct open_flags { umode_t mode; int acc_mode; int intent; + int lookup_flags; }; extern struct file *do_filp_open(int dfd, struct filename *pathname, - const struct open_flags *op, int flags); + const struct open_flags *op); extern struct file *do_file_open_root(struct dentry *, struct vfsmount *, - const char *, const struct open_flags *, int lookup_flags); + const char *, const struct open_flags *); extern long do_handle_open(int mountdirfd, struct file_handle __user *ufh, int open_flag); @@ -130,6 +131,13 @@ extern struct dentry *__d_alloc(struct super_block *, const struct qstr *); * read_write.c */ extern ssize_t __kernel_write(struct file *, const char *, size_t, loff_t *); +extern int rw_verify_area(int, struct file *, const loff_t *, size_t); + +/* + * splice.c + */ +extern long do_splice_direct(struct file *in, loff_t *ppos, struct file *out, + loff_t *opos, size_t len, unsigned int flags); /* * pipe.c diff --git a/fs/isofs/dir.c b/fs/isofs/dir.c index a7d5c3c3d4e6..b943cbd963bb 100644 --- a/fs/isofs/dir.c +++ b/fs/isofs/dir.c @@ -78,8 +78,8 @@ int get_acorn_filename(struct iso_directory_record *de, /* * This should _really_ be cleaned up some day.. */ -static int do_isofs_readdir(struct inode *inode, struct file *filp, - void *dirent, filldir_t filldir, +static int do_isofs_readdir(struct inode *inode, struct file *file, + struct dir_context *ctx, char *tmpname, struct iso_directory_record *tmpde) { unsigned long bufsize = ISOFS_BUFFER_SIZE(inode); @@ -94,10 +94,10 @@ static int do_isofs_readdir(struct inode *inode, struct file *filp, struct iso_directory_record *de; struct isofs_sb_info *sbi = ISOFS_SB(inode->i_sb); - offset = filp->f_pos & (bufsize - 1); - block = filp->f_pos >> bufbits; + offset = ctx->pos & (bufsize - 1); + block = ctx->pos >> bufbits; - while (filp->f_pos < inode->i_size) { + while (ctx->pos < inode->i_size) { int de_len; if (!bh) { @@ -108,7 +108,7 @@ static int do_isofs_readdir(struct inode *inode, struct file *filp, de = (struct iso_directory_record *) (bh->b_data + offset); - de_len = *(unsigned char *) de; + de_len = *(unsigned char *)de; /* * If the length byte is zero, we should move on to the next @@ -119,8 +119,8 @@ static int do_isofs_readdir(struct inode *inode, struct file *filp, if (de_len == 0) { brelse(bh); bh = NULL; - filp->f_pos = (filp->f_pos + ISOFS_BLOCK_SIZE) & ~(ISOFS_BLOCK_SIZE - 1); - block = filp->f_pos >> bufbits; + ctx->pos = (ctx->pos + ISOFS_BLOCK_SIZE) & ~(ISOFS_BLOCK_SIZE - 1); + block = ctx->pos >> bufbits; offset = 0; continue; } @@ -164,16 +164,16 @@ static int do_isofs_readdir(struct inode *inode, struct file *filp, if (de->flags[-sbi->s_high_sierra] & 0x80) { first_de = 0; - filp->f_pos += de_len; + ctx->pos += de_len; continue; } first_de = 1; /* Handle the case of the '.' directory */ if (de->name_len[0] == 1 && de->name[0] == 0) { - if (filldir(dirent, ".", 1, filp->f_pos, inode->i_ino, DT_DIR) < 0) + if (!dir_emit_dot(file, ctx)) break; - filp->f_pos += de_len; + ctx->pos += de_len; continue; } @@ -181,10 +181,9 @@ static int do_isofs_readdir(struct inode *inode, struct file *filp, /* Handle the case of the '..' directory */ if (de->name_len[0] == 1 && de->name[0] == 1) { - inode_number = parent_ino(filp->f_path.dentry); - if (filldir(dirent, "..", 2, filp->f_pos, inode_number, DT_DIR) < 0) + if (!dir_emit_dotdot(file, ctx)) break; - filp->f_pos += de_len; + ctx->pos += de_len; continue; } @@ -198,7 +197,7 @@ static int do_isofs_readdir(struct inode *inode, struct file *filp, if ((sbi->s_hide && (de->flags[-sbi->s_high_sierra] & 1)) || (!sbi->s_showassoc && (de->flags[-sbi->s_high_sierra] & 4))) { - filp->f_pos += de_len; + ctx->pos += de_len; continue; } @@ -230,10 +229,10 @@ static int do_isofs_readdir(struct inode *inode, struct file *filp, } } if (len > 0) { - if (filldir(dirent, p, len, filp->f_pos, inode_number, DT_UNKNOWN) < 0) + if (!dir_emit(ctx, p, len, inode_number, DT_UNKNOWN)) break; } - filp->f_pos += de_len; + ctx->pos += de_len; continue; } @@ -247,13 +246,12 @@ static int do_isofs_readdir(struct inode *inode, struct file *filp, * handling split directory entries.. The real work is done by * "do_isofs_readdir()". */ -static int isofs_readdir(struct file *filp, - void *dirent, filldir_t filldir) +static int isofs_readdir(struct file *file, struct dir_context *ctx) { int result; char *tmpname; struct iso_directory_record *tmpde; - struct inode *inode = file_inode(filp); + struct inode *inode = file_inode(file); tmpname = (char *)__get_free_page(GFP_KERNEL); if (tmpname == NULL) @@ -261,7 +259,7 @@ static int isofs_readdir(struct file *filp, tmpde = (struct iso_directory_record *) (tmpname+1024); - result = do_isofs_readdir(inode, filp, dirent, filldir, tmpname, tmpde); + result = do_isofs_readdir(inode, file, ctx, tmpname, tmpde); free_page((unsigned long) tmpname); return result; @@ -271,7 +269,7 @@ const struct file_operations isofs_dir_operations = { .llseek = generic_file_llseek, .read = generic_read_dir, - .readdir = isofs_readdir, + .iterate = isofs_readdir, }; /* diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c index d9b8aebdeb22..c348d6d88624 100644 --- a/fs/isofs/inode.c +++ b/fs/isofs/inode.c @@ -28,31 +28,23 @@ #define BEQUIET -static int isofs_hashi(const struct dentry *parent, const struct inode *inode, - struct qstr *qstr); -static int isofs_hash(const struct dentry *parent, const struct inode *inode, - struct qstr *qstr); +static int isofs_hashi(const struct dentry *parent, struct qstr *qstr); +static int isofs_hash(const struct dentry *parent, struct qstr *qstr); static int isofs_dentry_cmpi(const struct dentry *parent, - const struct inode *pinode, - const struct dentry *dentry, const struct inode *inode, + const struct dentry *dentry, unsigned int len, const char *str, const struct qstr *name); static int isofs_dentry_cmp(const struct dentry *parent, - const struct inode *pinode, - const struct dentry *dentry, const struct inode *inode, + const struct dentry *dentry, unsigned int len, const char *str, const struct qstr *name); #ifdef CONFIG_JOLIET -static int isofs_hashi_ms(const struct dentry *parent, const struct inode *inode, - struct qstr *qstr); -static int isofs_hash_ms(const struct dentry *parent, const struct inode *inode, - struct qstr *qstr); +static int isofs_hashi_ms(const struct dentry *parent, struct qstr *qstr); +static int isofs_hash_ms(const struct dentry *parent, struct qstr *qstr); static int isofs_dentry_cmpi_ms(const struct dentry *parent, - const struct inode *pinode, - const struct dentry *dentry, const struct inode *inode, + const struct dentry *dentry, unsigned int len, const char *str, const struct qstr *name); static int isofs_dentry_cmp_ms(const struct dentry *parent, - const struct inode *pinode, - const struct dentry *dentry, const struct inode *inode, + const struct dentry *dentry, unsigned int len, const char *str, const struct qstr *name); #endif @@ -265,30 +257,26 @@ static int isofs_dentry_cmp_common( } static int -isofs_hash(const struct dentry *dentry, const struct inode *inode, - struct qstr *qstr) +isofs_hash(const struct dentry *dentry, struct qstr *qstr) { return isofs_hash_common(dentry, qstr, 0); } static int -isofs_hashi(const struct dentry *dentry, const struct inode *inode, - struct qstr *qstr) +isofs_hashi(const struct dentry *dentry, struct qstr *qstr) { return isofs_hashi_common(dentry, qstr, 0); } static int -isofs_dentry_cmp(const struct dentry *parent, const struct inode *pinode, - const struct dentry *dentry, const struct inode *inode, +isofs_dentry_cmp(const struct dentry *parent, const struct dentry *dentry, unsigned int len, const char *str, const struct qstr *name) { return isofs_dentry_cmp_common(len, str, name, 0, 0); } static int -isofs_dentry_cmpi(const struct dentry *parent, const struct inode *pinode, - const struct dentry *dentry, const struct inode *inode, +isofs_dentry_cmpi(const struct dentry *parent, const struct dentry *dentry, unsigned int len, const char *str, const struct qstr *name) { return isofs_dentry_cmp_common(len, str, name, 0, 1); @@ -296,30 +284,26 @@ isofs_dentry_cmpi(const struct dentry *parent, const struct inode *pinode, #ifdef CONFIG_JOLIET static int -isofs_hash_ms(const struct dentry *dentry, const struct inode *inode, - struct qstr *qstr) +isofs_hash_ms(const struct dentry *dentry, struct qstr *qstr) { return isofs_hash_common(dentry, qstr, 1); } static int -isofs_hashi_ms(const struct dentry *dentry, const struct inode *inode, - struct qstr *qstr) +isofs_hashi_ms(const struct dentry *dentry, struct qstr *qstr) { return isofs_hashi_common(dentry, qstr, 1); } static int -isofs_dentry_cmp_ms(const struct dentry *parent, const struct inode *pinode, - const struct dentry *dentry, const struct inode *inode, +isofs_dentry_cmp_ms(const struct dentry *parent, const struct dentry *dentry, unsigned int len, const char *str, const struct qstr *name) { return isofs_dentry_cmp_common(len, str, name, 1, 0); } static int -isofs_dentry_cmpi_ms(const struct dentry *parent, const struct inode *pinode, - const struct dentry *dentry, const struct inode *inode, +isofs_dentry_cmpi_ms(const struct dentry *parent, const struct dentry *dentry, unsigned int len, const char *str, const struct qstr *name) { return isofs_dentry_cmp_common(len, str, name, 1, 1); diff --git a/fs/isofs/namei.c b/fs/isofs/namei.c index c167028844ed..95295640d9c8 100644 --- a/fs/isofs/namei.c +++ b/fs/isofs/namei.c @@ -37,8 +37,7 @@ isofs_cmp(struct dentry *dentry, const char *compare, int dlen) qstr.name = compare; qstr.len = dlen; - return dentry->d_op->d_compare(NULL, NULL, NULL, NULL, - dentry->d_name.len, dentry->d_name.name, &qstr); + return dentry->d_op->d_compare(NULL, NULL, dentry->d_name.len, dentry->d_name.name, &qstr); } /* diff --git a/fs/jbd/transaction.c b/fs/jbd/transaction.c index e3e255c0a509..be0c39b66fe0 100644 --- a/fs/jbd/transaction.c +++ b/fs/jbd/transaction.c @@ -2019,16 +2019,20 @@ zap_buffer_unlocked: * void journal_invalidatepage() - invalidate a journal page * @journal: journal to use for flush * @page: page to flush - * @offset: length of page to invalidate. + * @offset: offset of the range to invalidate + * @length: length of the range to invalidate * - * Reap page buffers containing data after offset in page. + * Reap page buffers containing data in specified range in page. */ void journal_invalidatepage(journal_t *journal, struct page *page, - unsigned long offset) + unsigned int offset, + unsigned int length) { struct buffer_head *head, *bh, *next; + unsigned int stop = offset + length; unsigned int curr_off = 0; + int partial_page = (offset || length < PAGE_CACHE_SIZE); int may_free = 1; if (!PageLocked(page)) @@ -2036,6 +2040,8 @@ void journal_invalidatepage(journal_t *journal, if (!page_has_buffers(page)) return; + BUG_ON(stop > PAGE_CACHE_SIZE || stop < length); + /* We will potentially be playing with lists other than just the * data lists (especially for journaled data mode), so be * cautious in our locking. */ @@ -2045,11 +2051,14 @@ void journal_invalidatepage(journal_t *journal, unsigned int next_off = curr_off + bh->b_size; next = bh->b_this_page; + if (next_off > stop) + return; + if (offset <= curr_off) { /* This block is wholly outside the truncation point */ lock_buffer(bh); may_free &= journal_unmap_buffer(journal, bh, - offset > 0); + partial_page); unlock_buffer(bh); } curr_off = next_off; @@ -2057,7 +2066,7 @@ void journal_invalidatepage(journal_t *journal, } while (bh != head); - if (!offset) { + if (!partial_page) { if (may_free && try_to_free_buffers(page)) J_ASSERT(!page_has_buffers(page)); } diff --git a/fs/jbd2/Kconfig b/fs/jbd2/Kconfig index 69a48c2944da..5a9f5534d57b 100644 --- a/fs/jbd2/Kconfig +++ b/fs/jbd2/Kconfig @@ -20,7 +20,7 @@ config JBD2 config JBD2_DEBUG bool "JBD2 (ext4) debugging support" - depends on JBD2 && DEBUG_FS + depends on JBD2 help If you are using the ext4 journaled file system (or potentially any other filesystem/device using JBD2), this option @@ -29,7 +29,7 @@ config JBD2_DEBUG By default, the debugging output will be turned off. If you select Y here, then you will be able to turn on debugging - with "echo N > /sys/kernel/debug/jbd2/jbd2-debug", where N is a + with "echo N > /sys/module/jbd2/parameters/jbd2_debug", where N is a number between 1 and 5. The higher the number, the more debugging output is generated. To turn debugging off again, do - "echo 0 > /sys/kernel/debug/jbd2/jbd2-debug". + "echo 0 > /sys/module/jbd2/parameters/jbd2_debug". diff --git a/fs/jbd2/checkpoint.c b/fs/jbd2/checkpoint.c index c78841ee81cf..7f34f4716165 100644 --- a/fs/jbd2/checkpoint.c +++ b/fs/jbd2/checkpoint.c @@ -120,8 +120,8 @@ void __jbd2_log_wait_for_space(journal_t *journal) int nblocks, space_left; /* assert_spin_locked(&journal->j_state_lock); */ - nblocks = jbd_space_needed(journal); - while (__jbd2_log_space_left(journal) < nblocks) { + nblocks = jbd2_space_needed(journal); + while (jbd2_log_space_left(journal) < nblocks) { if (journal->j_flags & JBD2_ABORT) return; write_unlock(&journal->j_state_lock); @@ -140,8 +140,8 @@ void __jbd2_log_wait_for_space(journal_t *journal) */ write_lock(&journal->j_state_lock); spin_lock(&journal->j_list_lock); - nblocks = jbd_space_needed(journal); - space_left = __jbd2_log_space_left(journal); + nblocks = jbd2_space_needed(journal); + space_left = jbd2_log_space_left(journal); if (space_left < nblocks) { int chkpt = journal->j_checkpoint_transactions != NULL; tid_t tid = 0; @@ -156,7 +156,15 @@ void __jbd2_log_wait_for_space(journal_t *journal) /* We were able to recover space; yay! */ ; } else if (tid) { + /* + * jbd2_journal_commit_transaction() may want + * to take the checkpoint_mutex if JBD2_FLUSHED + * is set. So we need to temporarily drop it. + */ + mutex_unlock(&journal->j_checkpoint_mutex); jbd2_log_wait_commit(journal, tid); + write_lock(&journal->j_state_lock); + continue; } else { printk(KERN_ERR "%s: needed %d blocks and " "only had %d space available\n", @@ -625,10 +633,6 @@ int __jbd2_journal_remove_checkpoint(struct journal_head *jh) __jbd2_journal_drop_transaction(journal, transaction); jbd2_journal_free_transaction(transaction); - - /* Just in case anybody was waiting for more transactions to be - checkpointed... */ - wake_up(&journal->j_wait_logspace); ret = 1; out: return ret; @@ -690,9 +694,7 @@ void __jbd2_journal_drop_transaction(journal_t *journal, transaction_t *transact J_ASSERT(transaction->t_state == T_FINISHED); J_ASSERT(transaction->t_buffers == NULL); J_ASSERT(transaction->t_forget == NULL); - J_ASSERT(transaction->t_iobuf_list == NULL); J_ASSERT(transaction->t_shadow_list == NULL); - J_ASSERT(transaction->t_log_list == NULL); J_ASSERT(transaction->t_checkpoint_list == NULL); J_ASSERT(transaction->t_checkpoint_io_list == NULL); J_ASSERT(atomic_read(&transaction->t_updates) == 0); diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c index 0f53946f13c1..559bec1a37b4 100644 --- a/fs/jbd2/commit.c +++ b/fs/jbd2/commit.c @@ -30,15 +30,22 @@ #include <trace/events/jbd2.h> /* - * Default IO end handler for temporary BJ_IO buffer_heads. + * IO end handler for temporary buffer_heads handling writes to the journal. */ static void journal_end_buffer_io_sync(struct buffer_head *bh, int uptodate) { + struct buffer_head *orig_bh = bh->b_private; + BUFFER_TRACE(bh, ""); if (uptodate) set_buffer_uptodate(bh); else clear_buffer_uptodate(bh); + if (orig_bh) { + clear_bit_unlock(BH_Shadow, &orig_bh->b_state); + smp_mb__after_clear_bit(); + wake_up_bit(&orig_bh->b_state, BH_Shadow); + } unlock_buffer(bh); } @@ -85,8 +92,7 @@ nope: __brelse(bh); } -static void jbd2_commit_block_csum_set(journal_t *j, - struct journal_head *descriptor) +static void jbd2_commit_block_csum_set(journal_t *j, struct buffer_head *bh) { struct commit_header *h; __u32 csum; @@ -94,12 +100,11 @@ static void jbd2_commit_block_csum_set(journal_t *j, if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2)) return; - h = (struct commit_header *)(jh2bh(descriptor)->b_data); + h = (struct commit_header *)(bh->b_data); h->h_chksum_type = 0; h->h_chksum_size = 0; h->h_chksum[0] = 0; - csum = jbd2_chksum(j, j->j_csum_seed, jh2bh(descriptor)->b_data, - j->j_blocksize); + csum = jbd2_chksum(j, j->j_csum_seed, bh->b_data, j->j_blocksize); h->h_chksum[0] = cpu_to_be32(csum); } @@ -116,7 +121,6 @@ static int journal_submit_commit_record(journal_t *journal, struct buffer_head **cbh, __u32 crc32_sum) { - struct journal_head *descriptor; struct commit_header *tmp; struct buffer_head *bh; int ret; @@ -127,12 +131,10 @@ static int journal_submit_commit_record(journal_t *journal, if (is_journal_aborted(journal)) return 0; - descriptor = jbd2_journal_get_descriptor_buffer(journal); - if (!descriptor) + bh = jbd2_journal_get_descriptor_buffer(journal); + if (!bh) return 1; - bh = jh2bh(descriptor); - tmp = (struct commit_header *)bh->b_data; tmp->h_magic = cpu_to_be32(JBD2_MAGIC_NUMBER); tmp->h_blocktype = cpu_to_be32(JBD2_COMMIT_BLOCK); @@ -146,9 +148,9 @@ static int journal_submit_commit_record(journal_t *journal, tmp->h_chksum_size = JBD2_CRC32_CHKSUM_SIZE; tmp->h_chksum[0] = cpu_to_be32(crc32_sum); } - jbd2_commit_block_csum_set(journal, descriptor); + jbd2_commit_block_csum_set(journal, bh); - JBUFFER_TRACE(descriptor, "submit commit block"); + BUFFER_TRACE(bh, "submit commit block"); lock_buffer(bh); clear_buffer_dirty(bh); set_buffer_uptodate(bh); @@ -180,7 +182,6 @@ static int journal_wait_on_commit_record(journal_t *journal, if (unlikely(!buffer_uptodate(bh))) ret = -EIO; put_bh(bh); /* One for getblk() */ - jbd2_journal_put_journal_head(bh2jh(bh)); return ret; } @@ -321,7 +322,7 @@ static void write_tag_block(int tag_bytes, journal_block_tag_t *tag, } static void jbd2_descr_block_csum_set(journal_t *j, - struct journal_head *descriptor) + struct buffer_head *bh) { struct jbd2_journal_block_tail *tail; __u32 csum; @@ -329,12 +330,10 @@ static void jbd2_descr_block_csum_set(journal_t *j, if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2)) return; - tail = (struct jbd2_journal_block_tail *) - (jh2bh(descriptor)->b_data + j->j_blocksize - + tail = (struct jbd2_journal_block_tail *)(bh->b_data + j->j_blocksize - sizeof(struct jbd2_journal_block_tail)); tail->t_checksum = 0; - csum = jbd2_chksum(j, j->j_csum_seed, jh2bh(descriptor)->b_data, - j->j_blocksize); + csum = jbd2_chksum(j, j->j_csum_seed, bh->b_data, j->j_blocksize); tail->t_checksum = cpu_to_be32(csum); } @@ -343,20 +342,21 @@ static void jbd2_block_tag_csum_set(journal_t *j, journal_block_tag_t *tag, { struct page *page = bh->b_page; __u8 *addr; - __u32 csum; + __u32 csum32; if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2)) return; sequence = cpu_to_be32(sequence); addr = kmap_atomic(page); - csum = jbd2_chksum(j, j->j_csum_seed, (__u8 *)&sequence, - sizeof(sequence)); - csum = jbd2_chksum(j, csum, addr + offset_in_page(bh->b_data), - bh->b_size); + csum32 = jbd2_chksum(j, j->j_csum_seed, (__u8 *)&sequence, + sizeof(sequence)); + csum32 = jbd2_chksum(j, csum32, addr + offset_in_page(bh->b_data), + bh->b_size); kunmap_atomic(addr); - tag->t_checksum = cpu_to_be32(csum); + /* We only have space to store the lower 16 bits of the crc32c. */ + tag->t_checksum = cpu_to_be16(csum32); } /* * jbd2_journal_commit_transaction @@ -368,7 +368,8 @@ void jbd2_journal_commit_transaction(journal_t *journal) { struct transaction_stats_s stats; transaction_t *commit_transaction; - struct journal_head *jh, *new_jh, *descriptor; + struct journal_head *jh; + struct buffer_head *descriptor; struct buffer_head **wbuf = journal->j_wbuf; int bufs; int flags; @@ -392,6 +393,8 @@ void jbd2_journal_commit_transaction(journal_t *journal) tid_t first_tid; int update_tail; int csum_size = 0; + LIST_HEAD(io_bufs); + LIST_HEAD(log_bufs); if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2)) csum_size = sizeof(struct jbd2_journal_block_tail); @@ -424,13 +427,13 @@ void jbd2_journal_commit_transaction(journal_t *journal) J_ASSERT(journal->j_committing_transaction == NULL); commit_transaction = journal->j_running_transaction; - J_ASSERT(commit_transaction->t_state == T_RUNNING); trace_jbd2_start_commit(journal, commit_transaction); jbd_debug(1, "JBD2: starting commit of transaction %d\n", commit_transaction->t_tid); write_lock(&journal->j_state_lock); + J_ASSERT(commit_transaction->t_state == T_RUNNING); commit_transaction->t_state = T_LOCKED; trace_jbd2_commit_locking(journal, commit_transaction); @@ -520,6 +523,12 @@ void jbd2_journal_commit_transaction(journal_t *journal) */ jbd2_journal_switch_revoke_table(journal); + /* + * Reserved credits cannot be claimed anymore, free them + */ + atomic_sub(atomic_read(&journal->j_reserved_credits), + &commit_transaction->t_outstanding_credits); + trace_jbd2_commit_flushing(journal, commit_transaction); stats.run.rs_flushing = jiffies; stats.run.rs_locked = jbd2_time_diff(stats.run.rs_locked, @@ -533,7 +542,7 @@ void jbd2_journal_commit_transaction(journal_t *journal) wake_up(&journal->j_wait_transaction_locked); write_unlock(&journal->j_state_lock); - jbd_debug(3, "JBD2: commit phase 2\n"); + jbd_debug(3, "JBD2: commit phase 2a\n"); /* * Now start flushing things to disk, in the order they appear @@ -545,10 +554,10 @@ void jbd2_journal_commit_transaction(journal_t *journal) blk_start_plug(&plug); jbd2_journal_write_revoke_records(journal, commit_transaction, - WRITE_SYNC); + &log_bufs, WRITE_SYNC); blk_finish_plug(&plug); - jbd_debug(3, "JBD2: commit phase 2\n"); + jbd_debug(3, "JBD2: commit phase 2b\n"); /* * Way to go: we have now written out all of the data for a @@ -571,8 +580,8 @@ void jbd2_journal_commit_transaction(journal_t *journal) atomic_read(&commit_transaction->t_outstanding_credits)); err = 0; - descriptor = NULL; bufs = 0; + descriptor = NULL; blk_start_plug(&plug); while (commit_transaction->t_buffers) { @@ -604,8 +613,6 @@ void jbd2_journal_commit_transaction(journal_t *journal) record the metadata buffer. */ if (!descriptor) { - struct buffer_head *bh; - J_ASSERT (bufs == 0); jbd_debug(4, "JBD2: get descriptor\n"); @@ -616,26 +623,26 @@ void jbd2_journal_commit_transaction(journal_t *journal) continue; } - bh = jh2bh(descriptor); jbd_debug(4, "JBD2: got buffer %llu (%p)\n", - (unsigned long long)bh->b_blocknr, bh->b_data); - header = (journal_header_t *)&bh->b_data[0]; + (unsigned long long)descriptor->b_blocknr, + descriptor->b_data); + header = (journal_header_t *)descriptor->b_data; header->h_magic = cpu_to_be32(JBD2_MAGIC_NUMBER); header->h_blocktype = cpu_to_be32(JBD2_DESCRIPTOR_BLOCK); header->h_sequence = cpu_to_be32(commit_transaction->t_tid); - tagp = &bh->b_data[sizeof(journal_header_t)]; - space_left = bh->b_size - sizeof(journal_header_t); + tagp = &descriptor->b_data[sizeof(journal_header_t)]; + space_left = descriptor->b_size - + sizeof(journal_header_t); first_tag = 1; - set_buffer_jwrite(bh); - set_buffer_dirty(bh); - wbuf[bufs++] = bh; + set_buffer_jwrite(descriptor); + set_buffer_dirty(descriptor); + wbuf[bufs++] = descriptor; /* Record it so that we can wait for IO completion later */ - BUFFER_TRACE(bh, "ph3: file as descriptor"); - jbd2_journal_file_buffer(descriptor, commit_transaction, - BJ_LogCtl); + BUFFER_TRACE(descriptor, "ph3: file as descriptor"); + jbd2_file_log_bh(&log_bufs, descriptor); } /* Where is the buffer to be written? */ @@ -658,29 +665,22 @@ void jbd2_journal_commit_transaction(journal_t *journal) /* Bump b_count to prevent truncate from stumbling over the shadowed buffer! @@@ This can go if we ever get - rid of the BJ_IO/BJ_Shadow pairing of buffers. */ + rid of the shadow pairing of buffers. */ atomic_inc(&jh2bh(jh)->b_count); - /* Make a temporary IO buffer with which to write it out - (this will requeue both the metadata buffer and the - temporary IO buffer). new_bh goes on BJ_IO*/ - - set_bit(BH_JWrite, &jh2bh(jh)->b_state); /* - * akpm: jbd2_journal_write_metadata_buffer() sets - * new_bh->b_transaction to commit_transaction. - * We need to clean this up before we release new_bh - * (which is of type BJ_IO) + * Make a temporary IO buffer with which to write it out + * (this will requeue the metadata buffer to BJ_Shadow). */ + set_bit(BH_JWrite, &jh2bh(jh)->b_state); JBUFFER_TRACE(jh, "ph3: write metadata"); flags = jbd2_journal_write_metadata_buffer(commit_transaction, - jh, &new_jh, blocknr); + jh, &wbuf[bufs], blocknr); if (flags < 0) { jbd2_journal_abort(journal, flags); continue; } - set_bit(BH_JWrite, &jh2bh(new_jh)->b_state); - wbuf[bufs++] = jh2bh(new_jh); + jbd2_file_log_bh(&io_bufs, wbuf[bufs]); /* Record the new block's tag in the current descriptor buffer */ @@ -694,10 +694,11 @@ void jbd2_journal_commit_transaction(journal_t *journal) tag = (journal_block_tag_t *) tagp; write_tag_block(tag_bytes, tag, jh2bh(jh)->b_blocknr); tag->t_flags = cpu_to_be16(tag_flag); - jbd2_block_tag_csum_set(journal, tag, jh2bh(new_jh), + jbd2_block_tag_csum_set(journal, tag, wbuf[bufs], commit_transaction->t_tid); tagp += tag_bytes; space_left -= tag_bytes; + bufs++; if (first_tag) { memcpy (tagp, journal->j_uuid, 16); @@ -809,7 +810,7 @@ start_journal_io: the log. Before we can commit it, wait for the IO so far to complete. Control buffers being written are on the transaction's t_log_list queue, and metadata buffers are on - the t_iobuf_list queue. + the io_bufs list. Wait for the buffers in reverse order. That way we are less likely to be woken up until all IOs have completed, and @@ -818,47 +819,33 @@ start_journal_io: jbd_debug(3, "JBD2: commit phase 3\n"); - /* - * akpm: these are BJ_IO, and j_list_lock is not needed. - * See __journal_try_to_free_buffer. - */ -wait_for_iobuf: - while (commit_transaction->t_iobuf_list != NULL) { - struct buffer_head *bh; + while (!list_empty(&io_bufs)) { + struct buffer_head *bh = list_entry(io_bufs.prev, + struct buffer_head, + b_assoc_buffers); - jh = commit_transaction->t_iobuf_list->b_tprev; - bh = jh2bh(jh); - if (buffer_locked(bh)) { - wait_on_buffer(bh); - goto wait_for_iobuf; - } - if (cond_resched()) - goto wait_for_iobuf; + wait_on_buffer(bh); + cond_resched(); if (unlikely(!buffer_uptodate(bh))) err = -EIO; - - clear_buffer_jwrite(bh); - - JBUFFER_TRACE(jh, "ph4: unfile after journal write"); - jbd2_journal_unfile_buffer(journal, jh); + jbd2_unfile_log_bh(bh); /* - * ->t_iobuf_list should contain only dummy buffer_heads - * which were created by jbd2_journal_write_metadata_buffer(). + * The list contains temporary buffer heads created by + * jbd2_journal_write_metadata_buffer(). */ BUFFER_TRACE(bh, "dumping temporary bh"); - jbd2_journal_put_journal_head(jh); __brelse(bh); J_ASSERT_BH(bh, atomic_read(&bh->b_count) == 0); free_buffer_head(bh); - /* We also have to unlock and free the corresponding - shadowed buffer */ + /* We also have to refile the corresponding shadowed buffer */ jh = commit_transaction->t_shadow_list->b_tprev; bh = jh2bh(jh); - clear_bit(BH_JWrite, &bh->b_state); + clear_buffer_jwrite(bh); J_ASSERT_BH(bh, buffer_jbddirty(bh)); + J_ASSERT_BH(bh, !buffer_shadow(bh)); /* The metadata is now released for reuse, but we need to remember it against this transaction so that when @@ -866,14 +853,6 @@ wait_for_iobuf: required. */ JBUFFER_TRACE(jh, "file as BJ_Forget"); jbd2_journal_file_buffer(jh, commit_transaction, BJ_Forget); - /* - * Wake up any transactions which were waiting for this IO to - * complete. The barrier must be here so that changes by - * jbd2_journal_file_buffer() take effect before wake_up_bit() - * does the waitqueue check. - */ - smp_mb(); - wake_up_bit(&bh->b_state, BH_Unshadow); JBUFFER_TRACE(jh, "brelse shadowed buffer"); __brelse(bh); } @@ -883,26 +862,19 @@ wait_for_iobuf: jbd_debug(3, "JBD2: commit phase 4\n"); /* Here we wait for the revoke record and descriptor record buffers */ - wait_for_ctlbuf: - while (commit_transaction->t_log_list != NULL) { + while (!list_empty(&log_bufs)) { struct buffer_head *bh; - jh = commit_transaction->t_log_list->b_tprev; - bh = jh2bh(jh); - if (buffer_locked(bh)) { - wait_on_buffer(bh); - goto wait_for_ctlbuf; - } - if (cond_resched()) - goto wait_for_ctlbuf; + bh = list_entry(log_bufs.prev, struct buffer_head, b_assoc_buffers); + wait_on_buffer(bh); + cond_resched(); if (unlikely(!buffer_uptodate(bh))) err = -EIO; BUFFER_TRACE(bh, "ph5: control buffer writeout done: unfile"); clear_buffer_jwrite(bh); - jbd2_journal_unfile_buffer(journal, jh); - jbd2_journal_put_journal_head(jh); + jbd2_unfile_log_bh(bh); __brelse(bh); /* One for getblk */ /* AKPM: bforget here */ } @@ -952,9 +924,7 @@ wait_for_iobuf: J_ASSERT(list_empty(&commit_transaction->t_inode_list)); J_ASSERT(commit_transaction->t_buffers == NULL); J_ASSERT(commit_transaction->t_checkpoint_list == NULL); - J_ASSERT(commit_transaction->t_iobuf_list == NULL); J_ASSERT(commit_transaction->t_shadow_list == NULL); - J_ASSERT(commit_transaction->t_log_list == NULL); restart_loop: /* diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c index 95457576e434..02c7ad9d7a41 100644 --- a/fs/jbd2/journal.c +++ b/fs/jbd2/journal.c @@ -103,6 +103,24 @@ EXPORT_SYMBOL(jbd2_inode_cache); static void __journal_abort_soft (journal_t *journal, int errno); static int jbd2_journal_create_slab(size_t slab_size); +#ifdef CONFIG_JBD2_DEBUG +void __jbd2_debug(int level, const char *file, const char *func, + unsigned int line, const char *fmt, ...) +{ + struct va_format vaf; + va_list args; + + if (level > jbd2_journal_enable_debug) + return; + va_start(args, fmt); + vaf.fmt = fmt; + vaf.va = &args; + printk(KERN_DEBUG "%s: (%s, %u): %pV\n", file, func, line, &vaf); + va_end(args); +} +EXPORT_SYMBOL(__jbd2_debug); +#endif + /* Checksumming functions */ int jbd2_verify_csum_type(journal_t *j, journal_superblock_t *sb) { @@ -310,14 +328,12 @@ static void journal_kill_thread(journal_t *journal) * * If the source buffer has already been modified by a new transaction * since we took the last commit snapshot, we use the frozen copy of - * that data for IO. If we end up using the existing buffer_head's data - * for the write, then we *have* to lock the buffer to prevent anyone - * else from using and possibly modifying it while the IO is in - * progress. + * that data for IO. If we end up using the existing buffer_head's data + * for the write, then we have to make sure nobody modifies it while the + * IO is in progress. do_get_write_access() handles this. * - * The function returns a pointer to the buffer_heads to be used for IO. - * - * We assume that the journal has already been locked in this function. + * The function returns a pointer to the buffer_head to be used for IO. + * * * Return value: * <0: Error @@ -330,15 +346,14 @@ static void journal_kill_thread(journal_t *journal) int jbd2_journal_write_metadata_buffer(transaction_t *transaction, struct journal_head *jh_in, - struct journal_head **jh_out, - unsigned long long blocknr) + struct buffer_head **bh_out, + sector_t blocknr) { int need_copy_out = 0; int done_copy_out = 0; int do_escape = 0; char *mapped_data; struct buffer_head *new_bh; - struct journal_head *new_jh; struct page *new_page; unsigned int new_offset; struct buffer_head *bh_in = jh2bh(jh_in); @@ -368,14 +383,13 @@ retry_alloc: /* keep subsequent assertions sane */ atomic_set(&new_bh->b_count, 1); - new_jh = jbd2_journal_add_journal_head(new_bh); /* This sleeps */ + jbd_lock_bh_state(bh_in); +repeat: /* * If a new transaction has already done a buffer copy-out, then * we use that version of the data for the commit. */ - jbd_lock_bh_state(bh_in); -repeat: if (jh_in->b_frozen_data) { done_copy_out = 1; new_page = virt_to_page(jh_in->b_frozen_data); @@ -415,7 +429,7 @@ repeat: jbd_unlock_bh_state(bh_in); tmp = jbd2_alloc(bh_in->b_size, GFP_NOFS); if (!tmp) { - jbd2_journal_put_journal_head(new_jh); + brelse(new_bh); return -ENOMEM; } jbd_lock_bh_state(bh_in); @@ -426,7 +440,7 @@ repeat: jh_in->b_frozen_data = tmp; mapped_data = kmap_atomic(new_page); - memcpy(tmp, mapped_data + new_offset, jh2bh(jh_in)->b_size); + memcpy(tmp, mapped_data + new_offset, bh_in->b_size); kunmap_atomic(mapped_data); new_page = virt_to_page(tmp); @@ -452,14 +466,14 @@ repeat: } set_bh_page(new_bh, new_page, new_offset); - new_jh->b_transaction = NULL; - new_bh->b_size = jh2bh(jh_in)->b_size; - new_bh->b_bdev = transaction->t_journal->j_dev; + new_bh->b_size = bh_in->b_size; + new_bh->b_bdev = journal->j_dev; new_bh->b_blocknr = blocknr; + new_bh->b_private = bh_in; set_buffer_mapped(new_bh); set_buffer_dirty(new_bh); - *jh_out = new_jh; + *bh_out = new_bh; /* * The to-be-written buffer needs to get moved to the io queue, @@ -470,11 +484,9 @@ repeat: spin_lock(&journal->j_list_lock); __jbd2_journal_file_buffer(jh_in, transaction, BJ_Shadow); spin_unlock(&journal->j_list_lock); + set_buffer_shadow(bh_in); jbd_unlock_bh_state(bh_in); - JBUFFER_TRACE(new_jh, "file as BJ_IO"); - jbd2_journal_file_buffer(new_jh, transaction, BJ_IO); - return do_escape | (done_copy_out << 1); } @@ -484,35 +496,6 @@ repeat: */ /* - * __jbd2_log_space_left: Return the number of free blocks left in the journal. - * - * Called with the journal already locked. - * - * Called under j_state_lock - */ - -int __jbd2_log_space_left(journal_t *journal) -{ - int left = journal->j_free; - - /* assert_spin_locked(&journal->j_state_lock); */ - - /* - * Be pessimistic here about the number of those free blocks which - * might be required for log descriptor control blocks. - */ - -#define MIN_LOG_RESERVED_BLOCKS 32 /* Allow for rounding errors */ - - left -= MIN_LOG_RESERVED_BLOCKS; - - if (left <= 0) - return 0; - left -= (left >> 3); - return left; -} - -/* * Called with j_state_lock locked for writing. * Returns true if a transaction commit was started. */ @@ -564,20 +547,17 @@ int jbd2_log_start_commit(journal_t *journal, tid_t tid) } /* - * Force and wait upon a commit if the calling process is not within - * transaction. This is used for forcing out undo-protected data which contains - * bitmaps, when the fs is running out of space. - * - * We can only force the running transaction if we don't have an active handle; - * otherwise, we will deadlock. - * - * Returns true if a transaction was started. + * Force and wait any uncommitted transactions. We can only force the running + * transaction if we don't have an active handle, otherwise, we will deadlock. + * Returns: <0 in case of error, + * 0 if nothing to commit, + * 1 if transaction was successfully committed. */ -int jbd2_journal_force_commit_nested(journal_t *journal) +static int __jbd2_journal_force_commit(journal_t *journal) { transaction_t *transaction = NULL; tid_t tid; - int need_to_start = 0; + int need_to_start = 0, ret = 0; read_lock(&journal->j_state_lock); if (journal->j_running_transaction && !current->journal_info) { @@ -588,16 +568,53 @@ int jbd2_journal_force_commit_nested(journal_t *journal) transaction = journal->j_committing_transaction; if (!transaction) { + /* Nothing to commit */ read_unlock(&journal->j_state_lock); - return 0; /* Nothing to retry */ + return 0; } - tid = transaction->t_tid; read_unlock(&journal->j_state_lock); if (need_to_start) jbd2_log_start_commit(journal, tid); - jbd2_log_wait_commit(journal, tid); - return 1; + ret = jbd2_log_wait_commit(journal, tid); + if (!ret) + ret = 1; + + return ret; +} + +/** + * Force and wait upon a commit if the calling process is not within + * transaction. This is used for forcing out undo-protected data which contains + * bitmaps, when the fs is running out of space. + * + * @journal: journal to force + * Returns true if progress was made. + */ +int jbd2_journal_force_commit_nested(journal_t *journal) +{ + int ret; + + ret = __jbd2_journal_force_commit(journal); + return ret > 0; +} + +/** + * int journal_force_commit() - force any uncommitted transactions + * @journal: journal to force + * + * Caller want unconditional commit. We can only force the running transaction + * if we don't have an active handle, otherwise, we will deadlock. + */ +int jbd2_journal_force_commit(journal_t *journal) +{ + int ret; + + J_ASSERT(!current->journal_info); + ret = __jbd2_journal_force_commit(journal); + if (ret > 0) + ret = 0; + return ret; } /* @@ -798,7 +815,7 @@ int jbd2_journal_bmap(journal_t *journal, unsigned long blocknr, * But we don't bother doing that, so there will be coherency problems with * mmaps of blockdevs which hold live JBD-controlled filesystems. */ -struct journal_head *jbd2_journal_get_descriptor_buffer(journal_t *journal) +struct buffer_head *jbd2_journal_get_descriptor_buffer(journal_t *journal) { struct buffer_head *bh; unsigned long long blocknr; @@ -817,7 +834,7 @@ struct journal_head *jbd2_journal_get_descriptor_buffer(journal_t *journal) set_buffer_uptodate(bh); unlock_buffer(bh); BUFFER_TRACE(bh, "return this buffer"); - return jbd2_journal_add_journal_head(bh); + return bh; } /* @@ -1062,11 +1079,10 @@ static journal_t * journal_init_common (void) return NULL; init_waitqueue_head(&journal->j_wait_transaction_locked); - init_waitqueue_head(&journal->j_wait_logspace); init_waitqueue_head(&journal->j_wait_done_commit); - init_waitqueue_head(&journal->j_wait_checkpoint); init_waitqueue_head(&journal->j_wait_commit); init_waitqueue_head(&journal->j_wait_updates); + init_waitqueue_head(&journal->j_wait_reserved); mutex_init(&journal->j_barrier); mutex_init(&journal->j_checkpoint_mutex); spin_lock_init(&journal->j_revoke_lock); @@ -1076,6 +1092,7 @@ static journal_t * journal_init_common (void) journal->j_commit_interval = (HZ * JBD2_DEFAULT_MAX_COMMIT_AGE); journal->j_min_batch_time = 0; journal->j_max_batch_time = 15000; /* 15ms */ + atomic_set(&journal->j_reserved_credits, 0); /* The journal is marked for error until we succeed with recovery! */ journal->j_flags = JBD2_ABORT; @@ -1318,6 +1335,7 @@ static int journal_reset(journal_t *journal) static void jbd2_write_superblock(journal_t *journal, int write_op) { struct buffer_head *bh = journal->j_sb_buffer; + journal_superblock_t *sb = journal->j_superblock; int ret; trace_jbd2_write_superblock(journal, write_op); @@ -1339,6 +1357,7 @@ static void jbd2_write_superblock(journal_t *journal, int write_op) clear_buffer_write_io_error(bh); set_buffer_uptodate(bh); } + jbd2_superblock_csum_set(journal, sb); get_bh(bh); bh->b_end_io = end_buffer_write_sync; ret = submit_bh(write_op, bh); @@ -1435,7 +1454,6 @@ void jbd2_journal_update_sb_errno(journal_t *journal) jbd_debug(1, "JBD2: updating superblock error (errno %d)\n", journal->j_errno); sb->s_errno = cpu_to_be32(journal->j_errno); - jbd2_superblock_csum_set(journal, sb); read_unlock(&journal->j_state_lock); jbd2_write_superblock(journal, WRITE_SYNC); @@ -2325,13 +2343,13 @@ static struct journal_head *journal_alloc_journal_head(void) #ifdef CONFIG_JBD2_DEBUG atomic_inc(&nr_journal_heads); #endif - ret = kmem_cache_alloc(jbd2_journal_head_cache, GFP_NOFS); + ret = kmem_cache_zalloc(jbd2_journal_head_cache, GFP_NOFS); if (!ret) { jbd_debug(1, "out of memory for journal_head\n"); pr_notice_ratelimited("ENOMEM in %s, retrying.\n", __func__); while (!ret) { yield(); - ret = kmem_cache_alloc(jbd2_journal_head_cache, GFP_NOFS); + ret = kmem_cache_zalloc(jbd2_journal_head_cache, GFP_NOFS); } } return ret; @@ -2393,10 +2411,8 @@ struct journal_head *jbd2_journal_add_journal_head(struct buffer_head *bh) struct journal_head *new_jh = NULL; repeat: - if (!buffer_jbd(bh)) { + if (!buffer_jbd(bh)) new_jh = journal_alloc_journal_head(); - memset(new_jh, 0, sizeof(*new_jh)); - } jbd_lock_bh_journal_head(bh); if (buffer_jbd(bh)) { diff --git a/fs/jbd2/recovery.c b/fs/jbd2/recovery.c index 626846bac32f..d4851464b57e 100644 --- a/fs/jbd2/recovery.c +++ b/fs/jbd2/recovery.c @@ -399,18 +399,17 @@ static int jbd2_commit_block_csum_verify(journal_t *j, void *buf) static int jbd2_block_tag_csum_verify(journal_t *j, journal_block_tag_t *tag, void *buf, __u32 sequence) { - __u32 provided, calculated; + __u32 csum32; if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2)) return 1; sequence = cpu_to_be32(sequence); - calculated = jbd2_chksum(j, j->j_csum_seed, (__u8 *)&sequence, - sizeof(sequence)); - calculated = jbd2_chksum(j, calculated, buf, j->j_blocksize); - provided = be32_to_cpu(tag->t_checksum); + csum32 = jbd2_chksum(j, j->j_csum_seed, (__u8 *)&sequence, + sizeof(sequence)); + csum32 = jbd2_chksum(j, csum32, buf, j->j_blocksize); - return provided == cpu_to_be32(calculated); + return tag->t_checksum == cpu_to_be16(csum32); } static int do_one_pass(journal_t *journal, diff --git a/fs/jbd2/revoke.c b/fs/jbd2/revoke.c index f30b80b4ce8b..198c9c10276d 100644 --- a/fs/jbd2/revoke.c +++ b/fs/jbd2/revoke.c @@ -122,9 +122,10 @@ struct jbd2_revoke_table_s #ifdef __KERNEL__ static void write_one_revoke_record(journal_t *, transaction_t *, - struct journal_head **, int *, + struct list_head *, + struct buffer_head **, int *, struct jbd2_revoke_record_s *, int); -static void flush_descriptor(journal_t *, struct journal_head *, int, int); +static void flush_descriptor(journal_t *, struct buffer_head *, int, int); #endif /* Utility functions to maintain the revoke table */ @@ -531,9 +532,10 @@ void jbd2_journal_switch_revoke_table(journal_t *journal) */ void jbd2_journal_write_revoke_records(journal_t *journal, transaction_t *transaction, + struct list_head *log_bufs, int write_op) { - struct journal_head *descriptor; + struct buffer_head *descriptor; struct jbd2_revoke_record_s *record; struct jbd2_revoke_table_s *revoke; struct list_head *hash_list; @@ -553,7 +555,7 @@ void jbd2_journal_write_revoke_records(journal_t *journal, while (!list_empty(hash_list)) { record = (struct jbd2_revoke_record_s *) hash_list->next; - write_one_revoke_record(journal, transaction, + write_one_revoke_record(journal, transaction, log_bufs, &descriptor, &offset, record, write_op); count++; @@ -573,13 +575,14 @@ void jbd2_journal_write_revoke_records(journal_t *journal, static void write_one_revoke_record(journal_t *journal, transaction_t *transaction, - struct journal_head **descriptorp, + struct list_head *log_bufs, + struct buffer_head **descriptorp, int *offsetp, struct jbd2_revoke_record_s *record, int write_op) { int csum_size = 0; - struct journal_head *descriptor; + struct buffer_head *descriptor; int offset; journal_header_t *header; @@ -609,26 +612,26 @@ static void write_one_revoke_record(journal_t *journal, descriptor = jbd2_journal_get_descriptor_buffer(journal); if (!descriptor) return; - header = (journal_header_t *) &jh2bh(descriptor)->b_data[0]; + header = (journal_header_t *)descriptor->b_data; header->h_magic = cpu_to_be32(JBD2_MAGIC_NUMBER); header->h_blocktype = cpu_to_be32(JBD2_REVOKE_BLOCK); header->h_sequence = cpu_to_be32(transaction->t_tid); /* Record it so that we can wait for IO completion later */ - JBUFFER_TRACE(descriptor, "file as BJ_LogCtl"); - jbd2_journal_file_buffer(descriptor, transaction, BJ_LogCtl); + BUFFER_TRACE(descriptor, "file in log_bufs"); + jbd2_file_log_bh(log_bufs, descriptor); offset = sizeof(jbd2_journal_revoke_header_t); *descriptorp = descriptor; } if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_64BIT)) { - * ((__be64 *)(&jh2bh(descriptor)->b_data[offset])) = + * ((__be64 *)(&descriptor->b_data[offset])) = cpu_to_be64(record->blocknr); offset += 8; } else { - * ((__be32 *)(&jh2bh(descriptor)->b_data[offset])) = + * ((__be32 *)(&descriptor->b_data[offset])) = cpu_to_be32(record->blocknr); offset += 4; } @@ -636,8 +639,7 @@ static void write_one_revoke_record(journal_t *journal, *offsetp = offset; } -static void jbd2_revoke_csum_set(journal_t *j, - struct journal_head *descriptor) +static void jbd2_revoke_csum_set(journal_t *j, struct buffer_head *bh) { struct jbd2_journal_revoke_tail *tail; __u32 csum; @@ -645,12 +647,10 @@ static void jbd2_revoke_csum_set(journal_t *j, if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2)) return; - tail = (struct jbd2_journal_revoke_tail *) - (jh2bh(descriptor)->b_data + j->j_blocksize - + tail = (struct jbd2_journal_revoke_tail *)(bh->b_data + j->j_blocksize - sizeof(struct jbd2_journal_revoke_tail)); tail->r_checksum = 0; - csum = jbd2_chksum(j, j->j_csum_seed, jh2bh(descriptor)->b_data, - j->j_blocksize); + csum = jbd2_chksum(j, j->j_csum_seed, bh->b_data, j->j_blocksize); tail->r_checksum = cpu_to_be32(csum); } @@ -662,25 +662,24 @@ static void jbd2_revoke_csum_set(journal_t *j, */ static void flush_descriptor(journal_t *journal, - struct journal_head *descriptor, + struct buffer_head *descriptor, int offset, int write_op) { jbd2_journal_revoke_header_t *header; - struct buffer_head *bh = jh2bh(descriptor); if (is_journal_aborted(journal)) { - put_bh(bh); + put_bh(descriptor); return; } - header = (jbd2_journal_revoke_header_t *) jh2bh(descriptor)->b_data; + header = (jbd2_journal_revoke_header_t *)descriptor->b_data; header->r_count = cpu_to_be32(offset); jbd2_revoke_csum_set(journal, descriptor); - set_buffer_jwrite(bh); - BUFFER_TRACE(bh, "write"); - set_buffer_dirty(bh); - write_dirty_buffer(bh, write_op); + set_buffer_jwrite(descriptor); + BUFFER_TRACE(descriptor, "write"); + set_buffer_dirty(descriptor); + write_dirty_buffer(descriptor, write_op); } #endif diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c index 10f524c59ea8..7aa9a32573bb 100644 --- a/fs/jbd2/transaction.c +++ b/fs/jbd2/transaction.c @@ -89,7 +89,8 @@ jbd2_get_transaction(journal_t *journal, transaction_t *transaction) transaction->t_expires = jiffies + journal->j_commit_interval; spin_lock_init(&transaction->t_handle_lock); atomic_set(&transaction->t_updates, 0); - atomic_set(&transaction->t_outstanding_credits, 0); + atomic_set(&transaction->t_outstanding_credits, + atomic_read(&journal->j_reserved_credits)); atomic_set(&transaction->t_handle_count, 0); INIT_LIST_HEAD(&transaction->t_inode_list); INIT_LIST_HEAD(&transaction->t_private_list); @@ -141,6 +142,112 @@ static inline void update_t_max_wait(transaction_t *transaction, } /* + * Wait until running transaction passes T_LOCKED state. Also starts the commit + * if needed. The function expects running transaction to exist and releases + * j_state_lock. + */ +static void wait_transaction_locked(journal_t *journal) + __releases(journal->j_state_lock) +{ + DEFINE_WAIT(wait); + int need_to_start; + tid_t tid = journal->j_running_transaction->t_tid; + + prepare_to_wait(&journal->j_wait_transaction_locked, &wait, + TASK_UNINTERRUPTIBLE); + need_to_start = !tid_geq(journal->j_commit_request, tid); + read_unlock(&journal->j_state_lock); + if (need_to_start) + jbd2_log_start_commit(journal, tid); + schedule(); + finish_wait(&journal->j_wait_transaction_locked, &wait); +} + +static void sub_reserved_credits(journal_t *journal, int blocks) +{ + atomic_sub(blocks, &journal->j_reserved_credits); + wake_up(&journal->j_wait_reserved); +} + +/* + * Wait until we can add credits for handle to the running transaction. Called + * with j_state_lock held for reading. Returns 0 if handle joined the running + * transaction. Returns 1 if we had to wait, j_state_lock is dropped, and + * caller must retry. + */ +static int add_transaction_credits(journal_t *journal, int blocks, + int rsv_blocks) +{ + transaction_t *t = journal->j_running_transaction; + int needed; + int total = blocks + rsv_blocks; + + /* + * If the current transaction is locked down for commit, wait + * for the lock to be released. + */ + if (t->t_state == T_LOCKED) { + wait_transaction_locked(journal); + return 1; + } + + /* + * If there is not enough space left in the log to write all + * potential buffers requested by this operation, we need to + * stall pending a log checkpoint to free some more log space. + */ + needed = atomic_add_return(total, &t->t_outstanding_credits); + if (needed > journal->j_max_transaction_buffers) { + /* + * If the current transaction is already too large, + * then start to commit it: we can then go back and + * attach this handle to a new transaction. + */ + atomic_sub(total, &t->t_outstanding_credits); + wait_transaction_locked(journal); + return 1; + } + + /* + * The commit code assumes that it can get enough log space + * without forcing a checkpoint. This is *critical* for + * correctness: a checkpoint of a buffer which is also + * associated with a committing transaction creates a deadlock, + * so commit simply cannot force through checkpoints. + * + * We must therefore ensure the necessary space in the journal + * *before* starting to dirty potentially checkpointed buffers + * in the new transaction. + */ + if (jbd2_log_space_left(journal) < jbd2_space_needed(journal)) { + atomic_sub(total, &t->t_outstanding_credits); + read_unlock(&journal->j_state_lock); + write_lock(&journal->j_state_lock); + if (jbd2_log_space_left(journal) < jbd2_space_needed(journal)) + __jbd2_log_wait_for_space(journal); + write_unlock(&journal->j_state_lock); + return 1; + } + + /* No reservation? We are done... */ + if (!rsv_blocks) + return 0; + + needed = atomic_add_return(rsv_blocks, &journal->j_reserved_credits); + /* We allow at most half of a transaction to be reserved */ + if (needed > journal->j_max_transaction_buffers / 2) { + sub_reserved_credits(journal, rsv_blocks); + atomic_sub(total, &t->t_outstanding_credits); + read_unlock(&journal->j_state_lock); + wait_event(journal->j_wait_reserved, + atomic_read(&journal->j_reserved_credits) + rsv_blocks + <= journal->j_max_transaction_buffers / 2); + return 1; + } + return 0; +} + +/* * start_this_handle: Given a handle, deal with any locking or stalling * needed to make sure that there is enough journal space for the handle * to begin. Attach the handle to a transaction and set up the @@ -151,18 +258,24 @@ static int start_this_handle(journal_t *journal, handle_t *handle, gfp_t gfp_mask) { transaction_t *transaction, *new_transaction = NULL; - tid_t tid; - int needed, need_to_start; - int nblocks = handle->h_buffer_credits; + int blocks = handle->h_buffer_credits; + int rsv_blocks = 0; unsigned long ts = jiffies; - if (nblocks > journal->j_max_transaction_buffers) { + /* + * 1/2 of transaction can be reserved so we can practically handle + * only 1/2 of maximum transaction size per operation + */ + if (WARN_ON(blocks > journal->j_max_transaction_buffers / 2)) { printk(KERN_ERR "JBD2: %s wants too many credits (%d > %d)\n", - current->comm, nblocks, - journal->j_max_transaction_buffers); + current->comm, blocks, + journal->j_max_transaction_buffers / 2); return -ENOSPC; } + if (handle->h_rsv_handle) + rsv_blocks = handle->h_rsv_handle->h_buffer_credits; + alloc_transaction: if (!journal->j_running_transaction) { new_transaction = kmem_cache_zalloc(transaction_cache, @@ -199,8 +312,12 @@ repeat: return -EROFS; } - /* Wait on the journal's transaction barrier if necessary */ - if (journal->j_barrier_count) { + /* + * Wait on the journal's transaction barrier if necessary. Specifically + * we allow reserved handles to proceed because otherwise commit could + * deadlock on page writeback not being able to complete. + */ + if (!handle->h_reserved && journal->j_barrier_count) { read_unlock(&journal->j_state_lock); wait_event(journal->j_wait_transaction_locked, journal->j_barrier_count == 0); @@ -213,7 +330,7 @@ repeat: goto alloc_transaction; write_lock(&journal->j_state_lock); if (!journal->j_running_transaction && - !journal->j_barrier_count) { + (handle->h_reserved || !journal->j_barrier_count)) { jbd2_get_transaction(journal, new_transaction); new_transaction = NULL; } @@ -223,85 +340,18 @@ repeat: transaction = journal->j_running_transaction; - /* - * If the current transaction is locked down for commit, wait for the - * lock to be released. - */ - if (transaction->t_state == T_LOCKED) { - DEFINE_WAIT(wait); - - prepare_to_wait(&journal->j_wait_transaction_locked, - &wait, TASK_UNINTERRUPTIBLE); - read_unlock(&journal->j_state_lock); - schedule(); - finish_wait(&journal->j_wait_transaction_locked, &wait); - goto repeat; - } - - /* - * If there is not enough space left in the log to write all potential - * buffers requested by this operation, we need to stall pending a log - * checkpoint to free some more log space. - */ - needed = atomic_add_return(nblocks, - &transaction->t_outstanding_credits); - - if (needed > journal->j_max_transaction_buffers) { + if (!handle->h_reserved) { + /* We may have dropped j_state_lock - restart in that case */ + if (add_transaction_credits(journal, blocks, rsv_blocks)) + goto repeat; + } else { /* - * If the current transaction is already too large, then start - * to commit it: we can then go back and attach this handle to - * a new transaction. + * We have handle reserved so we are allowed to join T_LOCKED + * transaction and we don't have to check for transaction size + * and journal space. */ - DEFINE_WAIT(wait); - - jbd_debug(2, "Handle %p starting new commit...\n", handle); - atomic_sub(nblocks, &transaction->t_outstanding_credits); - prepare_to_wait(&journal->j_wait_transaction_locked, &wait, - TASK_UNINTERRUPTIBLE); - tid = transaction->t_tid; - need_to_start = !tid_geq(journal->j_commit_request, tid); - read_unlock(&journal->j_state_lock); - if (need_to_start) - jbd2_log_start_commit(journal, tid); - schedule(); - finish_wait(&journal->j_wait_transaction_locked, &wait); - goto repeat; - } - - /* - * The commit code assumes that it can get enough log space - * without forcing a checkpoint. This is *critical* for - * correctness: a checkpoint of a buffer which is also - * associated with a committing transaction creates a deadlock, - * so commit simply cannot force through checkpoints. - * - * We must therefore ensure the necessary space in the journal - * *before* starting to dirty potentially checkpointed buffers - * in the new transaction. - * - * The worst part is, any transaction currently committing can - * reduce the free space arbitrarily. Be careful to account for - * those buffers when checkpointing. - */ - - /* - * @@@ AKPM: This seems rather over-defensive. We're giving commit - * a _lot_ of headroom: 1/4 of the journal plus the size of - * the committing transaction. Really, we only need to give it - * committing_transaction->t_outstanding_credits plus "enough" for - * the log control blocks. - * Also, this test is inconsistent with the matching one in - * jbd2_journal_extend(). - */ - if (__jbd2_log_space_left(journal) < jbd_space_needed(journal)) { - jbd_debug(2, "Handle %p waiting for checkpoint...\n", handle); - atomic_sub(nblocks, &transaction->t_outstanding_credits); - read_unlock(&journal->j_state_lock); - write_lock(&journal->j_state_lock); - if (__jbd2_log_space_left(journal) < jbd_space_needed(journal)) - __jbd2_log_wait_for_space(journal); - write_unlock(&journal->j_state_lock); - goto repeat; + sub_reserved_credits(journal, blocks); + handle->h_reserved = 0; } /* OK, account for the buffers that this operation expects to @@ -309,15 +359,16 @@ repeat: */ update_t_max_wait(transaction, ts); handle->h_transaction = transaction; - handle->h_requested_credits = nblocks; + handle->h_requested_credits = blocks; handle->h_start_jiffies = jiffies; atomic_inc(&transaction->t_updates); atomic_inc(&transaction->t_handle_count); - jbd_debug(4, "Handle %p given %d credits (total %d, free %d)\n", - handle, nblocks, + jbd_debug(4, "Handle %p given %d credits (total %d, free %lu)\n", + handle, blocks, atomic_read(&transaction->t_outstanding_credits), - __jbd2_log_space_left(journal)); + jbd2_log_space_left(journal)); read_unlock(&journal->j_state_lock); + current->journal_info = handle; lock_map_acquire(&handle->h_lockdep_map); jbd2_journal_free_transaction(new_transaction); @@ -348,16 +399,21 @@ static handle_t *new_handle(int nblocks) * * We make sure that the transaction can guarantee at least nblocks of * modified buffers in the log. We block until the log can guarantee - * that much space. - * - * This function is visible to journal users (like ext3fs), so is not - * called with the journal already locked. + * that much space. Additionally, if rsv_blocks > 0, we also create another + * handle with rsv_blocks reserved blocks in the journal. This handle is + * is stored in h_rsv_handle. It is not attached to any particular transaction + * and thus doesn't block transaction commit. If the caller uses this reserved + * handle, it has to set h_rsv_handle to NULL as otherwise jbd2_journal_stop() + * on the parent handle will dispose the reserved one. Reserved handle has to + * be converted to a normal handle using jbd2_journal_start_reserved() before + * it can be used. * * Return a pointer to a newly allocated handle, or an ERR_PTR() value * on failure. */ -handle_t *jbd2__journal_start(journal_t *journal, int nblocks, gfp_t gfp_mask, - unsigned int type, unsigned int line_no) +handle_t *jbd2__journal_start(journal_t *journal, int nblocks, int rsv_blocks, + gfp_t gfp_mask, unsigned int type, + unsigned int line_no) { handle_t *handle = journal_current_handle(); int err; @@ -374,13 +430,24 @@ handle_t *jbd2__journal_start(journal_t *journal, int nblocks, gfp_t gfp_mask, handle = new_handle(nblocks); if (!handle) return ERR_PTR(-ENOMEM); + if (rsv_blocks) { + handle_t *rsv_handle; - current->journal_info = handle; + rsv_handle = new_handle(rsv_blocks); + if (!rsv_handle) { + jbd2_free_handle(handle); + return ERR_PTR(-ENOMEM); + } + rsv_handle->h_reserved = 1; + rsv_handle->h_journal = journal; + handle->h_rsv_handle = rsv_handle; + } err = start_this_handle(journal, handle, gfp_mask); if (err < 0) { + if (handle->h_rsv_handle) + jbd2_free_handle(handle->h_rsv_handle); jbd2_free_handle(handle); - current->journal_info = NULL; return ERR_PTR(err); } handle->h_type = type; @@ -395,10 +462,65 @@ EXPORT_SYMBOL(jbd2__journal_start); handle_t *jbd2_journal_start(journal_t *journal, int nblocks) { - return jbd2__journal_start(journal, nblocks, GFP_NOFS, 0, 0); + return jbd2__journal_start(journal, nblocks, 0, GFP_NOFS, 0, 0); } EXPORT_SYMBOL(jbd2_journal_start); +void jbd2_journal_free_reserved(handle_t *handle) +{ + journal_t *journal = handle->h_journal; + + WARN_ON(!handle->h_reserved); + sub_reserved_credits(journal, handle->h_buffer_credits); + jbd2_free_handle(handle); +} +EXPORT_SYMBOL(jbd2_journal_free_reserved); + +/** + * int jbd2_journal_start_reserved(handle_t *handle) - start reserved handle + * @handle: handle to start + * + * Start handle that has been previously reserved with jbd2_journal_reserve(). + * This attaches @handle to the running transaction (or creates one if there's + * not transaction running). Unlike jbd2_journal_start() this function cannot + * block on journal commit, checkpointing, or similar stuff. It can block on + * memory allocation or frozen journal though. + * + * Return 0 on success, non-zero on error - handle is freed in that case. + */ +int jbd2_journal_start_reserved(handle_t *handle, unsigned int type, + unsigned int line_no) +{ + journal_t *journal = handle->h_journal; + int ret = -EIO; + + if (WARN_ON(!handle->h_reserved)) { + /* Someone passed in normal handle? Just stop it. */ + jbd2_journal_stop(handle); + return ret; + } + /* + * Usefulness of mixing of reserved and unreserved handles is + * questionable. So far nobody seems to need it so just error out. + */ + if (WARN_ON(current->journal_info)) { + jbd2_journal_free_reserved(handle); + return ret; + } + + handle->h_journal = NULL; + /* + * GFP_NOFS is here because callers are likely from writeback or + * similarly constrained call sites + */ + ret = start_this_handle(journal, handle, GFP_NOFS); + if (ret < 0) + jbd2_journal_free_reserved(handle); + handle->h_type = type; + handle->h_line_no = line_no; + return ret; +} +EXPORT_SYMBOL(jbd2_journal_start_reserved); /** * int jbd2_journal_extend() - extend buffer credits. @@ -423,49 +545,53 @@ EXPORT_SYMBOL(jbd2_journal_start); int jbd2_journal_extend(handle_t *handle, int nblocks) { transaction_t *transaction = handle->h_transaction; - journal_t *journal = transaction->t_journal; + journal_t *journal; int result; int wanted; - result = -EIO; + WARN_ON(!transaction); if (is_handle_aborted(handle)) - goto out; + return -EROFS; + journal = transaction->t_journal; result = 1; read_lock(&journal->j_state_lock); /* Don't extend a locked-down transaction! */ - if (handle->h_transaction->t_state != T_RUNNING) { + if (transaction->t_state != T_RUNNING) { jbd_debug(3, "denied handle %p %d blocks: " "transaction not running\n", handle, nblocks); goto error_out; } spin_lock(&transaction->t_handle_lock); - wanted = atomic_read(&transaction->t_outstanding_credits) + nblocks; + wanted = atomic_add_return(nblocks, + &transaction->t_outstanding_credits); if (wanted > journal->j_max_transaction_buffers) { jbd_debug(3, "denied handle %p %d blocks: " "transaction too large\n", handle, nblocks); + atomic_sub(nblocks, &transaction->t_outstanding_credits); goto unlock; } - if (wanted > __jbd2_log_space_left(journal)) { + if (wanted + (wanted >> JBD2_CONTROL_BLOCKS_SHIFT) > + jbd2_log_space_left(journal)) { jbd_debug(3, "denied handle %p %d blocks: " "insufficient log space\n", handle, nblocks); + atomic_sub(nblocks, &transaction->t_outstanding_credits); goto unlock; } trace_jbd2_handle_extend(journal->j_fs_dev->bd_dev, - handle->h_transaction->t_tid, + transaction->t_tid, handle->h_type, handle->h_line_no, handle->h_buffer_credits, nblocks); handle->h_buffer_credits += nblocks; handle->h_requested_credits += nblocks; - atomic_add(nblocks, &transaction->t_outstanding_credits); result = 0; jbd_debug(3, "extended handle %p by %d\n", handle, nblocks); @@ -473,7 +599,6 @@ unlock: spin_unlock(&transaction->t_handle_lock); error_out: read_unlock(&journal->j_state_lock); -out: return result; } @@ -490,19 +615,22 @@ out: * to a running handle, a call to jbd2_journal_restart will commit the * handle's transaction so far and reattach the handle to a new * transaction capabable of guaranteeing the requested number of - * credits. + * credits. We preserve reserved handle if there's any attached to the + * passed in handle. */ int jbd2__journal_restart(handle_t *handle, int nblocks, gfp_t gfp_mask) { transaction_t *transaction = handle->h_transaction; - journal_t *journal = transaction->t_journal; + journal_t *journal; tid_t tid; int need_to_start, ret; + WARN_ON(!transaction); /* If we've had an abort of any type, don't even think about * actually doing the restart! */ if (is_handle_aborted(handle)) return 0; + journal = transaction->t_journal; /* * First unlink the handle from its current transaction, and start the @@ -515,12 +643,18 @@ int jbd2__journal_restart(handle_t *handle, int nblocks, gfp_t gfp_mask) spin_lock(&transaction->t_handle_lock); atomic_sub(handle->h_buffer_credits, &transaction->t_outstanding_credits); + if (handle->h_rsv_handle) { + sub_reserved_credits(journal, + handle->h_rsv_handle->h_buffer_credits); + } if (atomic_dec_and_test(&transaction->t_updates)) wake_up(&journal->j_wait_updates); + tid = transaction->t_tid; spin_unlock(&transaction->t_handle_lock); + handle->h_transaction = NULL; + current->journal_info = NULL; jbd_debug(2, "restarting handle %p\n", handle); - tid = transaction->t_tid; need_to_start = !tid_geq(journal->j_commit_request, tid); read_unlock(&journal->j_state_lock); if (need_to_start) @@ -557,6 +691,14 @@ void jbd2_journal_lock_updates(journal_t *journal) write_lock(&journal->j_state_lock); ++journal->j_barrier_count; + /* Wait until there are no reserved handles */ + if (atomic_read(&journal->j_reserved_credits)) { + write_unlock(&journal->j_state_lock); + wait_event(journal->j_wait_reserved, + atomic_read(&journal->j_reserved_credits) == 0); + write_lock(&journal->j_state_lock); + } + /* Wait until there are no running updates */ while (1) { transaction_t *transaction = journal->j_running_transaction; @@ -619,6 +761,12 @@ static void warn_dirty_buffer(struct buffer_head *bh) bdevname(bh->b_bdev, b), (unsigned long long)bh->b_blocknr); } +static int sleep_on_shadow_bh(void *word) +{ + io_schedule(); + return 0; +} + /* * If the buffer is already part of the current transaction, then there * is nothing we need to do. If it is already part of a prior @@ -634,17 +782,16 @@ do_get_write_access(handle_t *handle, struct journal_head *jh, int force_copy) { struct buffer_head *bh; - transaction_t *transaction; + transaction_t *transaction = handle->h_transaction; journal_t *journal; int error; char *frozen_buffer = NULL; int need_copy = 0; unsigned long start_lock, time_lock; + WARN_ON(!transaction); if (is_handle_aborted(handle)) return -EROFS; - - transaction = handle->h_transaction; journal = transaction->t_journal; jbd_debug(5, "journal_head %p, force_copy %d\n", jh, force_copy); @@ -754,41 +901,29 @@ repeat: * journaled. If the primary copy is already going to * disk then we cannot do copy-out here. */ - if (jh->b_jlist == BJ_Shadow) { - DEFINE_WAIT_BIT(wait, &bh->b_state, BH_Unshadow); - wait_queue_head_t *wqh; - - wqh = bit_waitqueue(&bh->b_state, BH_Unshadow); - + if (buffer_shadow(bh)) { JBUFFER_TRACE(jh, "on shadow: sleep"); jbd_unlock_bh_state(bh); - /* commit wakes up all shadow buffers after IO */ - for ( ; ; ) { - prepare_to_wait(wqh, &wait.wait, - TASK_UNINTERRUPTIBLE); - if (jh->b_jlist != BJ_Shadow) - break; - schedule(); - } - finish_wait(wqh, &wait.wait); + wait_on_bit(&bh->b_state, BH_Shadow, + sleep_on_shadow_bh, TASK_UNINTERRUPTIBLE); goto repeat; } - /* Only do the copy if the currently-owning transaction - * still needs it. If it is on the Forget list, the - * committing transaction is past that stage. The - * buffer had better remain locked during the kmalloc, - * but that should be true --- we hold the journal lock - * still and the buffer is already on the BUF_JOURNAL - * list so won't be flushed. + /* + * Only do the copy if the currently-owning transaction still + * needs it. If buffer isn't on BJ_Metadata list, the + * committing transaction is past that stage (here we use the + * fact that BH_Shadow is set under bh_state lock together with + * refiling to BJ_Shadow list and at this point we know the + * buffer doesn't have BH_Shadow set). * * Subtle point, though: if this is a get_undo_access, * then we will be relying on the frozen_data to contain * the new value of the committed_data record after the * transaction, so we HAVE to force the frozen_data copy - * in that case. */ - - if (jh->b_jlist != BJ_Forget || force_copy) { + * in that case. + */ + if (jh->b_jlist == BJ_Metadata || force_copy) { JBUFFER_TRACE(jh, "generate frozen data"); if (!frozen_buffer) { JBUFFER_TRACE(jh, "allocate memory for buffer"); @@ -915,14 +1050,16 @@ int jbd2_journal_get_write_access(handle_t *handle, struct buffer_head *bh) int jbd2_journal_get_create_access(handle_t *handle, struct buffer_head *bh) { transaction_t *transaction = handle->h_transaction; - journal_t *journal = transaction->t_journal; + journal_t *journal; struct journal_head *jh = jbd2_journal_add_journal_head(bh); int err; jbd_debug(5, "journal_head %p\n", jh); + WARN_ON(!transaction); err = -EROFS; if (is_handle_aborted(handle)) goto out; + journal = transaction->t_journal; err = 0; JBUFFER_TRACE(jh, "entry"); @@ -1128,12 +1265,14 @@ void jbd2_buffer_abort_trigger(struct journal_head *jh, int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh) { transaction_t *transaction = handle->h_transaction; - journal_t *journal = transaction->t_journal; + journal_t *journal; struct journal_head *jh; int ret = 0; + WARN_ON(!transaction); if (is_handle_aborted(handle)) - goto out; + return -EROFS; + journal = transaction->t_journal; jh = jbd2_journal_grab_journal_head(bh); if (!jh) { ret = -EUCLEAN; @@ -1227,7 +1366,7 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh) JBUFFER_TRACE(jh, "file as BJ_Metadata"); spin_lock(&journal->j_list_lock); - __jbd2_journal_file_buffer(jh, handle->h_transaction, BJ_Metadata); + __jbd2_journal_file_buffer(jh, transaction, BJ_Metadata); spin_unlock(&journal->j_list_lock); out_unlock_bh: jbd_unlock_bh_state(bh); @@ -1258,12 +1397,17 @@ out: int jbd2_journal_forget (handle_t *handle, struct buffer_head *bh) { transaction_t *transaction = handle->h_transaction; - journal_t *journal = transaction->t_journal; + journal_t *journal; struct journal_head *jh; int drop_reserve = 0; int err = 0; int was_modified = 0; + WARN_ON(!transaction); + if (is_handle_aborted(handle)) + return -EROFS; + journal = transaction->t_journal; + BUFFER_TRACE(bh, "entry"); jbd_lock_bh_state(bh); @@ -1290,7 +1434,7 @@ int jbd2_journal_forget (handle_t *handle, struct buffer_head *bh) */ jh->b_modified = 0; - if (jh->b_transaction == handle->h_transaction) { + if (jh->b_transaction == transaction) { J_ASSERT_JH(jh, !jh->b_frozen_data); /* If we are forgetting a buffer which is already part @@ -1385,19 +1529,21 @@ drop: int jbd2_journal_stop(handle_t *handle) { transaction_t *transaction = handle->h_transaction; - journal_t *journal = transaction->t_journal; - int err, wait_for_commit = 0; + journal_t *journal; + int err = 0, wait_for_commit = 0; tid_t tid; pid_t pid; + if (!transaction) + goto free_and_exit; + journal = transaction->t_journal; + J_ASSERT(journal_current_handle() == handle); if (is_handle_aborted(handle)) err = -EIO; - else { + else J_ASSERT(atomic_read(&transaction->t_updates) > 0); - err = 0; - } if (--handle->h_ref > 0) { jbd_debug(4, "h_ref %d -> %d\n", handle->h_ref + 1, @@ -1407,7 +1553,7 @@ int jbd2_journal_stop(handle_t *handle) jbd_debug(4, "Handle %p going down\n", handle); trace_jbd2_handle_stats(journal->j_fs_dev->bd_dev, - handle->h_transaction->t_tid, + transaction->t_tid, handle->h_type, handle->h_line_no, jiffies - handle->h_start_jiffies, handle->h_sync, handle->h_requested_credits, @@ -1518,33 +1664,13 @@ int jbd2_journal_stop(handle_t *handle) lock_map_release(&handle->h_lockdep_map); + if (handle->h_rsv_handle) + jbd2_journal_free_reserved(handle->h_rsv_handle); +free_and_exit: jbd2_free_handle(handle); return err; } -/** - * int jbd2_journal_force_commit() - force any uncommitted transactions - * @journal: journal to force - * - * For synchronous operations: force any uncommitted transactions - * to disk. May seem kludgy, but it reuses all the handle batching - * code in a very simple manner. - */ -int jbd2_journal_force_commit(journal_t *journal) -{ - handle_t *handle; - int ret; - - handle = jbd2_journal_start(journal, 1); - if (IS_ERR(handle)) { - ret = PTR_ERR(handle); - } else { - handle->h_sync = 1; - ret = jbd2_journal_stop(handle); - } - return ret; -} - /* * * List management code snippets: various functions for manipulating the @@ -1601,10 +1727,10 @@ __blist_del_buffer(struct journal_head **list, struct journal_head *jh) * Remove a buffer from the appropriate transaction list. * * Note that this function can *change* the value of - * bh->b_transaction->t_buffers, t_forget, t_iobuf_list, t_shadow_list, - * t_log_list or t_reserved_list. If the caller is holding onto a copy of one - * of these pointers, it could go bad. Generally the caller needs to re-read - * the pointer from the transaction_t. + * bh->b_transaction->t_buffers, t_forget, t_shadow_list, t_log_list or + * t_reserved_list. If the caller is holding onto a copy of one of these + * pointers, it could go bad. Generally the caller needs to re-read the + * pointer from the transaction_t. * * Called under j_list_lock. */ @@ -1634,15 +1760,9 @@ static void __jbd2_journal_temp_unlink_buffer(struct journal_head *jh) case BJ_Forget: list = &transaction->t_forget; break; - case BJ_IO: - list = &transaction->t_iobuf_list; - break; case BJ_Shadow: list = &transaction->t_shadow_list; break; - case BJ_LogCtl: - list = &transaction->t_log_list; - break; case BJ_Reserved: list = &transaction->t_reserved_list; break; @@ -2034,18 +2154,23 @@ zap_buffer_unlocked: * void jbd2_journal_invalidatepage() * @journal: journal to use for flush... * @page: page to flush - * @offset: length of page to invalidate. + * @offset: start of the range to invalidate + * @length: length of the range to invalidate * - * Reap page buffers containing data after offset in page. Can return -EBUSY - * if buffers are part of the committing transaction and the page is straddling - * i_size. Caller then has to wait for current commit and try again. + * Reap page buffers containing data after in the specified range in page. + * Can return -EBUSY if buffers are part of the committing transaction and + * the page is straddling i_size. Caller then has to wait for current commit + * and try again. */ int jbd2_journal_invalidatepage(journal_t *journal, struct page *page, - unsigned long offset) + unsigned int offset, + unsigned int length) { struct buffer_head *head, *bh, *next; + unsigned int stop = offset + length; unsigned int curr_off = 0; + int partial_page = (offset || length < PAGE_CACHE_SIZE); int may_free = 1; int ret = 0; @@ -2054,6 +2179,8 @@ int jbd2_journal_invalidatepage(journal_t *journal, if (!page_has_buffers(page)) return 0; + BUG_ON(stop > PAGE_CACHE_SIZE || stop < length); + /* We will potentially be playing with lists other than just the * data lists (especially for journaled data mode), so be * cautious in our locking. */ @@ -2063,10 +2190,13 @@ int jbd2_journal_invalidatepage(journal_t *journal, unsigned int next_off = curr_off + bh->b_size; next = bh->b_this_page; + if (next_off > stop) + return 0; + if (offset <= curr_off) { /* This block is wholly outside the truncation point */ lock_buffer(bh); - ret = journal_unmap_buffer(journal, bh, offset > 0); + ret = journal_unmap_buffer(journal, bh, partial_page); unlock_buffer(bh); if (ret < 0) return ret; @@ -2077,7 +2207,7 @@ int jbd2_journal_invalidatepage(journal_t *journal, } while (bh != head); - if (!offset) { + if (!partial_page) { if (may_free && try_to_free_buffers(page)) J_ASSERT(!page_has_buffers(page)); } @@ -2138,15 +2268,9 @@ void __jbd2_journal_file_buffer(struct journal_head *jh, case BJ_Forget: list = &transaction->t_forget; break; - case BJ_IO: - list = &transaction->t_iobuf_list; - break; case BJ_Shadow: list = &transaction->t_shadow_list; break; - case BJ_LogCtl: - list = &transaction->t_log_list; - break; case BJ_Reserved: list = &transaction->t_reserved_list; break; @@ -2248,10 +2372,12 @@ void jbd2_journal_refile_buffer(journal_t *journal, struct journal_head *jh) int jbd2_journal_file_inode(handle_t *handle, struct jbd2_inode *jinode) { transaction_t *transaction = handle->h_transaction; - journal_t *journal = transaction->t_journal; + journal_t *journal; + WARN_ON(!transaction); if (is_handle_aborted(handle)) - return -EIO; + return -EROFS; + journal = transaction->t_journal; jbd_debug(4, "Adding inode %lu, tid:%d\n", jinode->i_vfs_inode->i_ino, transaction->t_tid); diff --git a/fs/jffs2/dir.c b/fs/jffs2/dir.c index acd46a4160cb..e3aac222472e 100644 --- a/fs/jffs2/dir.c +++ b/fs/jffs2/dir.c @@ -22,7 +22,7 @@ #include <linux/time.h> #include "nodelist.h" -static int jffs2_readdir (struct file *, void *, filldir_t); +static int jffs2_readdir (struct file *, struct dir_context *); static int jffs2_create (struct inode *,struct dentry *,umode_t, bool); @@ -40,7 +40,7 @@ static int jffs2_rename (struct inode *, struct dentry *, const struct file_operations jffs2_dir_operations = { .read = generic_read_dir, - .readdir = jffs2_readdir, + .iterate = jffs2_readdir, .unlocked_ioctl=jffs2_ioctl, .fsync = jffs2_fsync, .llseek = generic_file_llseek, @@ -114,60 +114,40 @@ static struct dentry *jffs2_lookup(struct inode *dir_i, struct dentry *target, /***********************************************************************/ -static int jffs2_readdir(struct file *filp, void *dirent, filldir_t filldir) +static int jffs2_readdir(struct file *file, struct dir_context *ctx) { - struct jffs2_inode_info *f; - struct inode *inode = file_inode(filp); + struct inode *inode = file_inode(file); + struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode); struct jffs2_full_dirent *fd; - unsigned long offset, curofs; + unsigned long curofs = 1; - jffs2_dbg(1, "jffs2_readdir() for dir_i #%lu\n", - file_inode(filp)->i_ino); + jffs2_dbg(1, "jffs2_readdir() for dir_i #%lu\n", inode->i_ino); - f = JFFS2_INODE_INFO(inode); - - offset = filp->f_pos; - - if (offset == 0) { - jffs2_dbg(1, "Dirent 0: \".\", ino #%lu\n", inode->i_ino); - if (filldir(dirent, ".", 1, 0, inode->i_ino, DT_DIR) < 0) - goto out; - offset++; - } - if (offset == 1) { - unsigned long pino = parent_ino(filp->f_path.dentry); - jffs2_dbg(1, "Dirent 1: \"..\", ino #%lu\n", pino); - if (filldir(dirent, "..", 2, 1, pino, DT_DIR) < 0) - goto out; - offset++; - } + if (!dir_emit_dots(file, ctx)) + return 0; - curofs=1; mutex_lock(&f->sem); for (fd = f->dents; fd; fd = fd->next) { - curofs++; - /* First loop: curofs = 2; offset = 2 */ - if (curofs < offset) { + /* First loop: curofs = 2; pos = 2 */ + if (curofs < ctx->pos) { jffs2_dbg(2, "Skipping dirent: \"%s\", ino #%u, type %d, because curofs %ld < offset %ld\n", - fd->name, fd->ino, fd->type, curofs, offset); + fd->name, fd->ino, fd->type, curofs, (unsigned long)ctx->pos); continue; } if (!fd->ino) { jffs2_dbg(2, "Skipping deletion dirent \"%s\"\n", fd->name); - offset++; + ctx->pos++; continue; } jffs2_dbg(2, "Dirent %ld: \"%s\", ino #%u, type %d\n", - offset, fd->name, fd->ino, fd->type); - if (filldir(dirent, fd->name, strlen(fd->name), offset, fd->ino, fd->type) < 0) + (unsigned long)ctx->pos, fd->name, fd->ino, fd->type); + if (!dir_emit(ctx, fd->name, strlen(fd->name), fd->ino, fd->type)) break; - offset++; + ctx->pos++; } mutex_unlock(&f->sem); - out: - filp->f_pos = offset; return 0; } diff --git a/fs/jfs/jfs_dtree.c b/fs/jfs/jfs_dtree.c index 0ddbeceafc62..9f4ed13d9f15 100644 --- a/fs/jfs/jfs_dtree.c +++ b/fs/jfs/jfs_dtree.c @@ -3002,9 +3002,9 @@ static inline struct jfs_dirent *next_jfs_dirent(struct jfs_dirent *dirent) * return: offset = (pn, index) of start entry * of next jfs_readdir()/dtRead() */ -int jfs_readdir(struct file *filp, void *dirent, filldir_t filldir) +int jfs_readdir(struct file *file, struct dir_context *ctx) { - struct inode *ip = file_inode(filp); + struct inode *ip = file_inode(file); struct nls_table *codepage = JFS_SBI(ip->i_sb)->nls_tab; int rc = 0; loff_t dtpos; /* legacy OS/2 style position */ @@ -3033,7 +3033,7 @@ int jfs_readdir(struct file *filp, void *dirent, filldir_t filldir) int overflow, fix_page, page_fixed = 0; static int unique_pos = 2; /* If we can't fix broken index */ - if (filp->f_pos == DIREND) + if (ctx->pos == DIREND) return 0; if (DO_INDEX(ip)) { @@ -3045,7 +3045,7 @@ int jfs_readdir(struct file *filp, void *dirent, filldir_t filldir) */ do_index = 1; - dir_index = (u32) filp->f_pos; + dir_index = (u32) ctx->pos; if (dir_index > 1) { struct dir_table_slot dirtab_slot; @@ -3053,25 +3053,25 @@ int jfs_readdir(struct file *filp, void *dirent, filldir_t filldir) if (dtEmpty(ip) || (dir_index >= JFS_IP(ip)->next_index)) { /* Stale position. Directory has shrunk */ - filp->f_pos = DIREND; + ctx->pos = DIREND; return 0; } repeat: rc = read_index(ip, dir_index, &dirtab_slot); if (rc) { - filp->f_pos = DIREND; + ctx->pos = DIREND; return rc; } if (dirtab_slot.flag == DIR_INDEX_FREE) { if (loop_count++ > JFS_IP(ip)->next_index) { jfs_err("jfs_readdir detected " "infinite loop!"); - filp->f_pos = DIREND; + ctx->pos = DIREND; return 0; } dir_index = le32_to_cpu(dirtab_slot.addr2); if (dir_index == -1) { - filp->f_pos = DIREND; + ctx->pos = DIREND; return 0; } goto repeat; @@ -3080,13 +3080,13 @@ int jfs_readdir(struct file *filp, void *dirent, filldir_t filldir) index = dirtab_slot.slot; DT_GETPAGE(ip, bn, mp, PSIZE, p, rc); if (rc) { - filp->f_pos = DIREND; + ctx->pos = DIREND; return 0; } if (p->header.flag & BT_INTERNAL) { jfs_err("jfs_readdir: bad index table"); DT_PUTPAGE(mp); - filp->f_pos = -1; + ctx->pos = -1; return 0; } } else { @@ -3094,23 +3094,22 @@ int jfs_readdir(struct file *filp, void *dirent, filldir_t filldir) /* * self "." */ - filp->f_pos = 0; - if (filldir(dirent, ".", 1, 0, ip->i_ino, - DT_DIR)) + ctx->pos = 0; + if (!dir_emit(ctx, ".", 1, ip->i_ino, DT_DIR)) return 0; } /* * parent ".." */ - filp->f_pos = 1; - if (filldir(dirent, "..", 2, 1, PARENT(ip), DT_DIR)) + ctx->pos = 1; + if (!dir_emit(ctx, "..", 2, PARENT(ip), DT_DIR)) return 0; /* * Find first entry of left-most leaf */ if (dtEmpty(ip)) { - filp->f_pos = DIREND; + ctx->pos = DIREND; return 0; } @@ -3128,23 +3127,19 @@ int jfs_readdir(struct file *filp, void *dirent, filldir_t filldir) * pn > 0: Real entries, pn=1 -> leftmost page * pn = index = -1: No more entries */ - dtpos = filp->f_pos; + dtpos = ctx->pos; if (dtpos == 0) { /* build "." entry */ - - if (filldir(dirent, ".", 1, filp->f_pos, ip->i_ino, - DT_DIR)) + if (!dir_emit(ctx, ".", 1, ip->i_ino, DT_DIR)) return 0; dtoffset->index = 1; - filp->f_pos = dtpos; + ctx->pos = dtpos; } if (dtoffset->pn == 0) { if (dtoffset->index == 1) { /* build ".." entry */ - - if (filldir(dirent, "..", 2, filp->f_pos, - PARENT(ip), DT_DIR)) + if (!dir_emit(ctx, "..", 2, PARENT(ip), DT_DIR)) return 0; } else { jfs_err("jfs_readdir called with " @@ -3152,18 +3147,18 @@ int jfs_readdir(struct file *filp, void *dirent, filldir_t filldir) } dtoffset->pn = 1; dtoffset->index = 0; - filp->f_pos = dtpos; + ctx->pos = dtpos; } if (dtEmpty(ip)) { - filp->f_pos = DIREND; + ctx->pos = DIREND; return 0; } - if ((rc = dtReadNext(ip, &filp->f_pos, &btstack))) { + if ((rc = dtReadNext(ip, &ctx->pos, &btstack))) { jfs_err("jfs_readdir: unexpected rc = %d " "from dtReadNext", rc); - filp->f_pos = DIREND; + ctx->pos = DIREND; return 0; } /* get start leaf page and index */ @@ -3171,7 +3166,7 @@ int jfs_readdir(struct file *filp, void *dirent, filldir_t filldir) /* offset beyond directory eof ? */ if (bn < 0) { - filp->f_pos = DIREND; + ctx->pos = DIREND; return 0; } } @@ -3180,7 +3175,7 @@ int jfs_readdir(struct file *filp, void *dirent, filldir_t filldir) if (dirent_buf == 0) { DT_PUTPAGE(mp); jfs_warn("jfs_readdir: __get_free_page failed!"); - filp->f_pos = DIREND; + ctx->pos = DIREND; return -ENOMEM; } @@ -3295,9 +3290,9 @@ skip_one: jfs_dirent = (struct jfs_dirent *) dirent_buf; while (jfs_dirents--) { - filp->f_pos = jfs_dirent->position; - if (filldir(dirent, jfs_dirent->name, - jfs_dirent->name_len, filp->f_pos, + ctx->pos = jfs_dirent->position; + if (!dir_emit(ctx, jfs_dirent->name, + jfs_dirent->name_len, jfs_dirent->ino, DT_UNKNOWN)) goto out; jfs_dirent = next_jfs_dirent(jfs_dirent); @@ -3309,7 +3304,7 @@ skip_one: } if (!overflow && (bn == 0)) { - filp->f_pos = DIREND; + ctx->pos = DIREND; break; } diff --git a/fs/jfs/jfs_dtree.h b/fs/jfs/jfs_dtree.h index 2545bb317235..fd4169e6e698 100644 --- a/fs/jfs/jfs_dtree.h +++ b/fs/jfs/jfs_dtree.h @@ -265,5 +265,5 @@ extern int dtDelete(tid_t tid, struct inode *ip, struct component_name * key, extern int dtModify(tid_t tid, struct inode *ip, struct component_name * key, ino_t * orig_ino, ino_t new_ino, int flag); -extern int jfs_readdir(struct file *filp, void *dirent, filldir_t filldir); +extern int jfs_readdir(struct file *file, struct dir_context *ctx); #endif /* !_H_JFS_DTREE */ diff --git a/fs/jfs/jfs_metapage.c b/fs/jfs/jfs_metapage.c index 6740d34cd82b..9e3aaff11f89 100644 --- a/fs/jfs/jfs_metapage.c +++ b/fs/jfs/jfs_metapage.c @@ -571,9 +571,10 @@ static int metapage_releasepage(struct page *page, gfp_t gfp_mask) return ret; } -static void metapage_invalidatepage(struct page *page, unsigned long offset) +static void metapage_invalidatepage(struct page *page, unsigned int offset, + unsigned int length) { - BUG_ON(offset); + BUG_ON(offset || length < PAGE_CACHE_SIZE); BUG_ON(PageWriteback(page)); diff --git a/fs/jfs/namei.c b/fs/jfs/namei.c index 3b91a7ad6086..8b19027291d6 100644 --- a/fs/jfs/namei.c +++ b/fs/jfs/namei.c @@ -1529,7 +1529,7 @@ const struct inode_operations jfs_dir_inode_operations = { const struct file_operations jfs_dir_operations = { .read = generic_read_dir, - .readdir = jfs_readdir, + .iterate = jfs_readdir, .fsync = jfs_fsync, .unlocked_ioctl = jfs_ioctl, #ifdef CONFIG_COMPAT @@ -1538,8 +1538,7 @@ const struct file_operations jfs_dir_operations = { .llseek = generic_file_llseek, }; -static int jfs_ci_hash(const struct dentry *dir, const struct inode *inode, - struct qstr *this) +static int jfs_ci_hash(const struct dentry *dir, struct qstr *this) { unsigned long hash; int i; @@ -1552,9 +1551,7 @@ static int jfs_ci_hash(const struct dentry *dir, const struct inode *inode, return 0; } -static int jfs_ci_compare(const struct dentry *parent, - const struct inode *pinode, - const struct dentry *dentry, const struct inode *inode, +static int jfs_ci_compare(const struct dentry *parent, const struct dentry *dentry, unsigned int len, const char *str, const struct qstr *name) { int i, result = 1; diff --git a/fs/libfs.c b/fs/libfs.c index 916da8c4158b..c3a0837fb861 100644 --- a/fs/libfs.c +++ b/fs/libfs.c @@ -135,60 +135,40 @@ static inline unsigned char dt_type(struct inode *inode) * both impossible due to the lock on directory. */ -int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir) +int dcache_readdir(struct file *file, struct dir_context *ctx) { - struct dentry *dentry = filp->f_path.dentry; - struct dentry *cursor = filp->private_data; + struct dentry *dentry = file->f_path.dentry; + struct dentry *cursor = file->private_data; struct list_head *p, *q = &cursor->d_u.d_child; - ino_t ino; - int i = filp->f_pos; - switch (i) { - case 0: - ino = dentry->d_inode->i_ino; - if (filldir(dirent, ".", 1, i, ino, DT_DIR) < 0) - break; - filp->f_pos++; - i++; - /* fallthrough */ - case 1: - ino = parent_ino(dentry); - if (filldir(dirent, "..", 2, i, ino, DT_DIR) < 0) - break; - filp->f_pos++; - i++; - /* fallthrough */ - default: - spin_lock(&dentry->d_lock); - if (filp->f_pos == 2) - list_move(q, &dentry->d_subdirs); - - for (p=q->next; p != &dentry->d_subdirs; p=p->next) { - struct dentry *next; - next = list_entry(p, struct dentry, d_u.d_child); - spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED); - if (!simple_positive(next)) { - spin_unlock(&next->d_lock); - continue; - } + if (!dir_emit_dots(file, ctx)) + return 0; + spin_lock(&dentry->d_lock); + if (ctx->pos == 2) + list_move(q, &dentry->d_subdirs); + + for (p = q->next; p != &dentry->d_subdirs; p = p->next) { + struct dentry *next = list_entry(p, struct dentry, d_u.d_child); + spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED); + if (!simple_positive(next)) { + spin_unlock(&next->d_lock); + continue; + } - spin_unlock(&next->d_lock); - spin_unlock(&dentry->d_lock); - if (filldir(dirent, next->d_name.name, - next->d_name.len, filp->f_pos, - next->d_inode->i_ino, - dt_type(next->d_inode)) < 0) - return 0; - spin_lock(&dentry->d_lock); - spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED); - /* next is still alive */ - list_move(q, p); - spin_unlock(&next->d_lock); - p = q; - filp->f_pos++; - } - spin_unlock(&dentry->d_lock); + spin_unlock(&next->d_lock); + spin_unlock(&dentry->d_lock); + if (!dir_emit(ctx, next->d_name.name, next->d_name.len, + next->d_inode->i_ino, dt_type(next->d_inode))) + return 0; + spin_lock(&dentry->d_lock); + spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED); + /* next is still alive */ + list_move(q, p); + spin_unlock(&next->d_lock); + p = q; + ctx->pos++; } + spin_unlock(&dentry->d_lock); return 0; } @@ -202,7 +182,7 @@ const struct file_operations simple_dir_operations = { .release = dcache_dir_close, .llseek = dcache_dir_lseek, .read = generic_read_dir, - .readdir = dcache_readdir, + .iterate = dcache_readdir, .fsync = noop_fsync, }; diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c index a2aa97d45670..10d6c41aecad 100644 --- a/fs/lockd/svc.c +++ b/fs/lockd/svc.c @@ -305,7 +305,7 @@ static int lockd_start_svc(struct svc_serv *serv) svc_sock_update_bufs(serv); serv->sv_maxconn = nlm_max_connections; - nlmsvc_task = kthread_run(lockd, nlmsvc_rqst, serv->sv_name); + nlmsvc_task = kthread_run(lockd, nlmsvc_rqst, "%s", serv->sv_name); if (IS_ERR(nlmsvc_task)) { error = PTR_ERR(nlmsvc_task); printk(KERN_WARNING diff --git a/fs/lockd/svclock.c b/fs/lockd/svclock.c index e703318c41df..067778b0ccc9 100644 --- a/fs/lockd/svclock.c +++ b/fs/lockd/svclock.c @@ -276,7 +276,7 @@ static int nlmsvc_unlink_block(struct nlm_block *block) dprintk("lockd: unlinking block %p...\n", block); /* Remove block from list */ - status = posix_unblock_lock(block->b_file->f_file, &block->b_call->a_args.lock.fl); + status = posix_unblock_lock(&block->b_call->a_args.lock.fl); nlmsvc_remove_block(block); return status; } @@ -744,8 +744,20 @@ static int nlmsvc_same_owner(struct file_lock *fl1, struct file_lock *fl2) return fl1->fl_owner == fl2->fl_owner && fl1->fl_pid == fl2->fl_pid; } +/* + * Since NLM uses two "keys" for tracking locks, we need to hash them down + * to one for the blocked_hash. Here, we're just xor'ing the host address + * with the pid in order to create a key value for picking a hash bucket. + */ +static unsigned long +nlmsvc_owner_key(struct file_lock *fl) +{ + return (unsigned long)fl->fl_owner ^ (unsigned long)fl->fl_pid; +} + const struct lock_manager_operations nlmsvc_lock_operations = { .lm_compare_owner = nlmsvc_same_owner, + .lm_owner_key = nlmsvc_owner_key, .lm_notify = nlmsvc_notify_blocked, .lm_grant = nlmsvc_grant_deferred, }; diff --git a/fs/lockd/svcsubs.c b/fs/lockd/svcsubs.c index 97e87415b145..dc5c75930f0f 100644 --- a/fs/lockd/svcsubs.c +++ b/fs/lockd/svcsubs.c @@ -169,7 +169,7 @@ nlm_traverse_locks(struct nlm_host *host, struct nlm_file *file, again: file->f_locks = 0; - lock_flocks(); /* protects i_flock list */ + spin_lock(&inode->i_lock); for (fl = inode->i_flock; fl; fl = fl->fl_next) { if (fl->fl_lmops != &nlmsvc_lock_operations) continue; @@ -181,7 +181,7 @@ again: if (match(lockhost, host)) { struct file_lock lock = *fl; - unlock_flocks(); + spin_unlock(&inode->i_lock); lock.fl_type = F_UNLCK; lock.fl_start = 0; lock.fl_end = OFFSET_MAX; @@ -193,7 +193,7 @@ again: goto again; } } - unlock_flocks(); + spin_unlock(&inode->i_lock); return 0; } @@ -228,14 +228,14 @@ nlm_file_inuse(struct nlm_file *file) if (file->f_count || !list_empty(&file->f_blocks) || file->f_shares) return 1; - lock_flocks(); + spin_lock(&inode->i_lock); for (fl = inode->i_flock; fl; fl = fl->fl_next) { if (fl->fl_lmops == &nlmsvc_lock_operations) { - unlock_flocks(); + spin_unlock(&inode->i_lock); return 1; } } - unlock_flocks(); + spin_unlock(&inode->i_lock); file->f_locks = 0; return 0; } diff --git a/fs/locks.c b/fs/locks.c index cb424a4fed71..04e2c1fdb157 100644 --- a/fs/locks.c +++ b/fs/locks.c @@ -126,6 +126,7 @@ #include <linux/time.h> #include <linux/rcupdate.h> #include <linux/pid_namespace.h> +#include <linux/hashtable.h> #include <asm/uaccess.h> @@ -153,30 +154,51 @@ int lease_break_time = 45; #define for_each_lock(inode, lockp) \ for (lockp = &inode->i_flock; *lockp != NULL; lockp = &(*lockp)->fl_next) -static LIST_HEAD(file_lock_list); -static LIST_HEAD(blocked_list); +/* + * The global file_lock_list is only used for displaying /proc/locks. Protected + * by the file_lock_lock. + */ +static HLIST_HEAD(file_lock_list); static DEFINE_SPINLOCK(file_lock_lock); /* - * Protects the two list heads above, plus the inode->i_flock list + * The blocked_hash is used to find POSIX lock loops for deadlock detection. + * It is protected by blocked_lock_lock. + * + * We hash locks by lockowner in order to optimize searching for the lock a + * particular lockowner is waiting on. + * + * FIXME: make this value scale via some heuristic? We generally will want more + * buckets when we have more lockowners holding locks, but that's a little + * difficult to determine without knowing what the workload will look like. */ -void lock_flocks(void) -{ - spin_lock(&file_lock_lock); -} -EXPORT_SYMBOL_GPL(lock_flocks); +#define BLOCKED_HASH_BITS 7 +static DEFINE_HASHTABLE(blocked_hash, BLOCKED_HASH_BITS); -void unlock_flocks(void) -{ - spin_unlock(&file_lock_lock); -} -EXPORT_SYMBOL_GPL(unlock_flocks); +/* + * This lock protects the blocked_hash. Generally, if you're accessing it, you + * want to be holding this lock. + * + * In addition, it also protects the fl->fl_block list, and the fl->fl_next + * pointer for file_lock structures that are acting as lock requests (in + * contrast to those that are acting as records of acquired locks). + * + * Note that when we acquire this lock in order to change the above fields, + * we often hold the i_lock as well. In certain cases, when reading the fields + * protected by this lock, we can skip acquiring it iff we already hold the + * i_lock. + * + * In particular, adding an entry to the fl_block list requires that you hold + * both the i_lock and the blocked_lock_lock (acquired in that order). Deleting + * an entry from the list however only requires the file_lock_lock. + */ +static DEFINE_SPINLOCK(blocked_lock_lock); static struct kmem_cache *filelock_cache __read_mostly; static void locks_init_lock_heads(struct file_lock *fl) { - INIT_LIST_HEAD(&fl->fl_link); + INIT_HLIST_NODE(&fl->fl_link); INIT_LIST_HEAD(&fl->fl_block); init_waitqueue_head(&fl->fl_wait); } @@ -210,7 +232,7 @@ void locks_free_lock(struct file_lock *fl) { BUG_ON(waitqueue_active(&fl->fl_wait)); BUG_ON(!list_empty(&fl->fl_block)); - BUG_ON(!list_empty(&fl->fl_link)); + BUG_ON(!hlist_unhashed(&fl->fl_link)); locks_release_private(fl); kmem_cache_free(filelock_cache, fl); @@ -484,47 +506,108 @@ static int posix_same_owner(struct file_lock *fl1, struct file_lock *fl2) return fl1->fl_owner == fl2->fl_owner; } +static inline void +locks_insert_global_locks(struct file_lock *fl) +{ + spin_lock(&file_lock_lock); + hlist_add_head(&fl->fl_link, &file_lock_list); + spin_unlock(&file_lock_lock); +} + +static inline void +locks_delete_global_locks(struct file_lock *fl) +{ + spin_lock(&file_lock_lock); + hlist_del_init(&fl->fl_link); + spin_unlock(&file_lock_lock); +} + +static unsigned long +posix_owner_key(struct file_lock *fl) +{ + if (fl->fl_lmops && fl->fl_lmops->lm_owner_key) + return fl->fl_lmops->lm_owner_key(fl); + return (unsigned long)fl->fl_owner; +} + +static inline void +locks_insert_global_blocked(struct file_lock *waiter) +{ + hash_add(blocked_hash, &waiter->fl_link, posix_owner_key(waiter)); +} + +static inline void +locks_delete_global_blocked(struct file_lock *waiter) +{ + hash_del(&waiter->fl_link); +} + /* Remove waiter from blocker's block list. * When blocker ends up pointing to itself then the list is empty. + * + * Must be called with blocked_lock_lock held. */ static void __locks_delete_block(struct file_lock *waiter) { + locks_delete_global_blocked(waiter); list_del_init(&waiter->fl_block); - list_del_init(&waiter->fl_link); waiter->fl_next = NULL; } -/* - */ -void locks_delete_block(struct file_lock *waiter) +static void locks_delete_block(struct file_lock *waiter) { - lock_flocks(); + spin_lock(&blocked_lock_lock); __locks_delete_block(waiter); - unlock_flocks(); + spin_unlock(&blocked_lock_lock); } -EXPORT_SYMBOL(locks_delete_block); /* Insert waiter into blocker's block list. * We use a circular list so that processes can be easily woken up in * the order they blocked. The documentation doesn't require this but * it seems like the reasonable thing to do. + * + * Must be called with both the i_lock and blocked_lock_lock held. The fl_block + * list itself is protected by the file_lock_list, but by ensuring that the + * i_lock is also held on insertions we can avoid taking the blocked_lock_lock + * in some cases when we see that the fl_block list is empty. */ -static void locks_insert_block(struct file_lock *blocker, - struct file_lock *waiter) +static void __locks_insert_block(struct file_lock *blocker, + struct file_lock *waiter) { BUG_ON(!list_empty(&waiter->fl_block)); - list_add_tail(&waiter->fl_block, &blocker->fl_block); waiter->fl_next = blocker; + list_add_tail(&waiter->fl_block, &blocker->fl_block); if (IS_POSIX(blocker)) - list_add(&waiter->fl_link, &blocked_list); + locks_insert_global_blocked(waiter); } -/* Wake up processes blocked waiting for blocker. - * If told to wait then schedule the processes until the block list - * is empty, otherwise empty the block list ourselves. +/* Must be called with i_lock held. */ +static void locks_insert_block(struct file_lock *blocker, + struct file_lock *waiter) +{ + spin_lock(&blocked_lock_lock); + __locks_insert_block(blocker, waiter); + spin_unlock(&blocked_lock_lock); +} + +/* + * Wake up processes blocked waiting for blocker. + * + * Must be called with the inode->i_lock held! */ static void locks_wake_up_blocks(struct file_lock *blocker) { + /* + * Avoid taking global lock if list is empty. This is safe since new + * blocked requests are only added to the list under the i_lock, and + * the i_lock is always held here. Note that removal from the fl_block + * list does not require the i_lock, so we must recheck list_empty() + * after acquiring the blocked_lock_lock. + */ + if (list_empty(&blocker->fl_block)) + return; + + spin_lock(&blocked_lock_lock); while (!list_empty(&blocker->fl_block)) { struct file_lock *waiter; @@ -536,20 +619,23 @@ static void locks_wake_up_blocks(struct file_lock *blocker) else wake_up(&waiter->fl_wait); } + spin_unlock(&blocked_lock_lock); } /* Insert file lock fl into an inode's lock list at the position indicated * by pos. At the same time add the lock to the global file lock list. + * + * Must be called with the i_lock held! */ static void locks_insert_lock(struct file_lock **pos, struct file_lock *fl) { - list_add(&fl->fl_link, &file_lock_list); - fl->fl_nspid = get_pid(task_tgid(current)); /* insert into file's list */ fl->fl_next = *pos; *pos = fl; + + locks_insert_global_locks(fl); } /* @@ -557,14 +643,17 @@ static void locks_insert_lock(struct file_lock **pos, struct file_lock *fl) * Wake up processes that are blocked waiting for this lock, * notify the FS that the lock has been cleared and * finally free the lock. + * + * Must be called with the i_lock held! */ static void locks_delete_lock(struct file_lock **thisfl_p) { struct file_lock *fl = *thisfl_p; + locks_delete_global_locks(fl); + *thisfl_p = fl->fl_next; fl->fl_next = NULL; - list_del_init(&fl->fl_link); if (fl->fl_nspid) { put_pid(fl->fl_nspid); @@ -625,8 +714,9 @@ void posix_test_lock(struct file *filp, struct file_lock *fl) { struct file_lock *cfl; + struct inode *inode = file_inode(filp); - lock_flocks(); + spin_lock(&inode->i_lock); for (cfl = file_inode(filp)->i_flock; cfl; cfl = cfl->fl_next) { if (!IS_POSIX(cfl)) continue; @@ -639,7 +729,7 @@ posix_test_lock(struct file *filp, struct file_lock *fl) fl->fl_pid = pid_vnr(cfl->fl_nspid); } else fl->fl_type = F_UNLCK; - unlock_flocks(); + spin_unlock(&inode->i_lock); return; } EXPORT_SYMBOL(posix_test_lock); @@ -676,13 +766,14 @@ static struct file_lock *what_owner_is_waiting_for(struct file_lock *block_fl) { struct file_lock *fl; - list_for_each_entry(fl, &blocked_list, fl_link) { + hash_for_each_possible(blocked_hash, fl, fl_link, posix_owner_key(block_fl)) { if (posix_same_owner(fl, block_fl)) return fl->fl_next; } return NULL; } +/* Must be called with the blocked_lock_lock held! */ static int posix_locks_deadlock(struct file_lock *caller_fl, struct file_lock *block_fl) { @@ -718,7 +809,7 @@ static int flock_lock_file(struct file *filp, struct file_lock *request) return -ENOMEM; } - lock_flocks(); + spin_lock(&inode->i_lock); if (request->fl_flags & FL_ACCESS) goto find_conflict; @@ -748,9 +839,9 @@ static int flock_lock_file(struct file *filp, struct file_lock *request) * give it the opportunity to lock the file. */ if (found) { - unlock_flocks(); + spin_unlock(&inode->i_lock); cond_resched(); - lock_flocks(); + spin_lock(&inode->i_lock); } find_conflict: @@ -777,7 +868,7 @@ find_conflict: error = 0; out: - unlock_flocks(); + spin_unlock(&inode->i_lock); if (new_fl) locks_free_lock(new_fl); return error; @@ -791,7 +882,8 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request, str struct file_lock *left = NULL; struct file_lock *right = NULL; struct file_lock **before; - int error, added = 0; + int error; + bool added = false; /* * We may need two file_lock structures for this operation, @@ -806,7 +898,12 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request, str new_fl2 = locks_alloc_lock(); } - lock_flocks(); + spin_lock(&inode->i_lock); + /* + * New lock request. Walk all POSIX locks and look for conflicts. If + * there are any, either return error or put the request on the + * blocker's list of waiters and the global blocked_hash. + */ if (request->fl_type != F_UNLCK) { for_each_lock(inode, before) { fl = *before; @@ -819,11 +916,17 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request, str error = -EAGAIN; if (!(request->fl_flags & FL_SLEEP)) goto out; + /* + * Deadlock detection and insertion into the blocked + * locks list must be done while holding the same lock! + */ error = -EDEADLK; - if (posix_locks_deadlock(request, fl)) - goto out; - error = FILE_LOCK_DEFERRED; - locks_insert_block(fl, request); + spin_lock(&blocked_lock_lock); + if (likely(!posix_locks_deadlock(request, fl))) { + error = FILE_LOCK_DEFERRED; + __locks_insert_block(fl, request); + } + spin_unlock(&blocked_lock_lock); goto out; } } @@ -845,7 +948,7 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request, str before = &fl->fl_next; } - /* Process locks with this owner. */ + /* Process locks with this owner. */ while ((fl = *before) && posix_same_owner(request, fl)) { /* Detect adjacent or overlapping regions (if same lock type) */ @@ -880,7 +983,7 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request, str continue; } request = fl; - added = 1; + added = true; } else { /* Processing for different lock types is a bit @@ -891,7 +994,7 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request, str if (fl->fl_start > request->fl_end) break; if (request->fl_type == F_UNLCK) - added = 1; + added = true; if (fl->fl_start < request->fl_start) left = fl; /* If the next lock in the list has a higher end @@ -921,7 +1024,7 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request, str locks_release_private(fl); locks_copy_private(fl, request); request = fl; - added = 1; + added = true; } } /* Go on to next lock. @@ -931,10 +1034,9 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request, str } /* - * The above code only modifies existing locks in case of - * merging or replacing. If new lock(s) need to be inserted - * all modifications are done bellow this, so it's safe yet to - * bail out. + * The above code only modifies existing locks in case of merging or + * replacing. If new lock(s) need to be inserted all modifications are + * done below this, so it's safe yet to bail out. */ error = -ENOLCK; /* "no luck" */ if (right && left == right && !new_fl2) @@ -974,7 +1076,7 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request, str locks_wake_up_blocks(left); } out: - unlock_flocks(); + spin_unlock(&inode->i_lock); /* * Free any unused locks. */ @@ -1049,14 +1151,14 @@ int locks_mandatory_locked(struct inode *inode) /* * Search the lock list for this inode for any POSIX locks. */ - lock_flocks(); + spin_lock(&inode->i_lock); for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) { if (!IS_POSIX(fl)) continue; if (fl->fl_owner != owner) break; } - unlock_flocks(); + spin_unlock(&inode->i_lock); return fl ? -EAGAIN : 0; } @@ -1199,7 +1301,7 @@ int __break_lease(struct inode *inode, unsigned int mode) if (IS_ERR(new_fl)) return PTR_ERR(new_fl); - lock_flocks(); + spin_lock(&inode->i_lock); time_out_leases(inode); @@ -1249,11 +1351,11 @@ restart: break_time++; } locks_insert_block(flock, new_fl); - unlock_flocks(); + spin_unlock(&inode->i_lock); error = wait_event_interruptible_timeout(new_fl->fl_wait, !new_fl->fl_next, break_time); - lock_flocks(); - __locks_delete_block(new_fl); + spin_lock(&inode->i_lock); + locks_delete_block(new_fl); if (error >= 0) { if (error == 0) time_out_leases(inode); @@ -1270,7 +1372,7 @@ restart: } out: - unlock_flocks(); + spin_unlock(&inode->i_lock); locks_free_lock(new_fl); return error; } @@ -1323,9 +1425,10 @@ EXPORT_SYMBOL(lease_get_mtime); int fcntl_getlease(struct file *filp) { struct file_lock *fl; + struct inode *inode = file_inode(filp); int type = F_UNLCK; - lock_flocks(); + spin_lock(&inode->i_lock); time_out_leases(file_inode(filp)); for (fl = file_inode(filp)->i_flock; fl && IS_LEASE(fl); fl = fl->fl_next) { @@ -1334,11 +1437,11 @@ int fcntl_getlease(struct file *filp) break; } } - unlock_flocks(); + spin_unlock(&inode->i_lock); return type; } -int generic_add_lease(struct file *filp, long arg, struct file_lock **flp) +static int generic_add_lease(struct file *filp, long arg, struct file_lock **flp) { struct file_lock *fl, **before, **my_before = NULL, *lease; struct dentry *dentry = filp->f_path.dentry; @@ -1403,7 +1506,7 @@ out: return error; } -int generic_delete_lease(struct file *filp, struct file_lock **flp) +static int generic_delete_lease(struct file *filp, struct file_lock **flp) { struct file_lock *fl, **before; struct dentry *dentry = filp->f_path.dentry; @@ -1428,7 +1531,7 @@ int generic_delete_lease(struct file *filp, struct file_lock **flp) * The (input) flp->fl_lmops->lm_break function is required * by break_lease(). * - * Called with file_lock_lock held. + * Called with inode->i_lock held. */ int generic_setlease(struct file *filp, long arg, struct file_lock **flp) { @@ -1497,11 +1600,12 @@ static int __vfs_setlease(struct file *filp, long arg, struct file_lock **lease) int vfs_setlease(struct file *filp, long arg, struct file_lock **lease) { + struct inode *inode = file_inode(filp); int error; - lock_flocks(); + spin_lock(&inode->i_lock); error = __vfs_setlease(filp, arg, lease); - unlock_flocks(); + spin_unlock(&inode->i_lock); return error; } @@ -1519,6 +1623,7 @@ static int do_fcntl_delete_lease(struct file *filp) static int do_fcntl_add_lease(unsigned int fd, struct file *filp, long arg) { struct file_lock *fl, *ret; + struct inode *inode = file_inode(filp); struct fasync_struct *new; int error; @@ -1532,10 +1637,10 @@ static int do_fcntl_add_lease(unsigned int fd, struct file *filp, long arg) return -ENOMEM; } ret = fl; - lock_flocks(); + spin_lock(&inode->i_lock); error = __vfs_setlease(filp, arg, &ret); if (error) { - unlock_flocks(); + spin_unlock(&inode->i_lock); locks_free_lock(fl); goto out_free_fasync; } @@ -1552,7 +1657,7 @@ static int do_fcntl_add_lease(unsigned int fd, struct file *filp, long arg) new = NULL; error = __f_setown(filp, task_pid(current), PIDTYPE_PID, 0); - unlock_flocks(); + spin_unlock(&inode->i_lock); out_free_fasync: if (new) @@ -2076,7 +2181,7 @@ void locks_remove_flock(struct file *filp) fl.fl_ops->fl_release_private(&fl); } - lock_flocks(); + spin_lock(&inode->i_lock); before = &inode->i_flock; while ((fl = *before) != NULL) { @@ -2094,30 +2199,28 @@ void locks_remove_flock(struct file *filp) } before = &fl->fl_next; } - unlock_flocks(); + spin_unlock(&inode->i_lock); } /** * posix_unblock_lock - stop waiting for a file lock - * @filp: how the file was opened * @waiter: the lock which was waiting * * lockd needs to block waiting for locks. */ int -posix_unblock_lock(struct file *filp, struct file_lock *waiter) +posix_unblock_lock(struct file_lock *waiter) { int status = 0; - lock_flocks(); + spin_lock(&blocked_lock_lock); if (waiter->fl_next) __locks_delete_block(waiter); else status = -ENOENT; - unlock_flocks(); + spin_unlock(&blocked_lock_lock); return status; } - EXPORT_SYMBOL(posix_unblock_lock); /** @@ -2215,7 +2318,7 @@ static int locks_show(struct seq_file *f, void *v) { struct file_lock *fl, *bfl; - fl = list_entry(v, struct file_lock, fl_link); + fl = hlist_entry(v, struct file_lock, fl_link); lock_get_status(f, fl, *((loff_t *)f->private), ""); @@ -2229,21 +2332,23 @@ static void *locks_start(struct seq_file *f, loff_t *pos) { loff_t *p = f->private; - lock_flocks(); + spin_lock(&file_lock_lock); + spin_lock(&blocked_lock_lock); *p = (*pos + 1); - return seq_list_start(&file_lock_list, *pos); + return seq_hlist_start(&file_lock_list, *pos); } static void *locks_next(struct seq_file *f, void *v, loff_t *pos) { loff_t *p = f->private; ++*p; - return seq_list_next(v, &file_lock_list, pos); + return seq_hlist_next(v, &file_lock_list, pos); } static void locks_stop(struct seq_file *f, void *v) { - unlock_flocks(); + spin_unlock(&blocked_lock_lock); + spin_unlock(&file_lock_lock); } static const struct seq_operations locks_seq_operations = { @@ -2290,7 +2395,8 @@ int lock_may_read(struct inode *inode, loff_t start, unsigned long len) { struct file_lock *fl; int result = 1; - lock_flocks(); + + spin_lock(&inode->i_lock); for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) { if (IS_POSIX(fl)) { if (fl->fl_type == F_RDLCK) @@ -2307,7 +2413,7 @@ int lock_may_read(struct inode *inode, loff_t start, unsigned long len) result = 0; break; } - unlock_flocks(); + spin_unlock(&inode->i_lock); return result; } @@ -2330,7 +2436,8 @@ int lock_may_write(struct inode *inode, loff_t start, unsigned long len) { struct file_lock *fl; int result = 1; - lock_flocks(); + + spin_lock(&inode->i_lock); for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) { if (IS_POSIX(fl)) { if ((fl->fl_end < start) || (fl->fl_start > (start + len))) @@ -2345,7 +2452,7 @@ int lock_may_write(struct inode *inode, loff_t start, unsigned long len) result = 0; break; } - unlock_flocks(); + spin_unlock(&inode->i_lock); return result; } diff --git a/fs/logfs/dir.c b/fs/logfs/dir.c index b82751082112..6bdc347008f5 100644 --- a/fs/logfs/dir.c +++ b/fs/logfs/dir.c @@ -281,17 +281,23 @@ static int logfs_rmdir(struct inode *dir, struct dentry *dentry) /* FIXME: readdir currently has it's own dir_walk code. I don't see a good * way to combine the two copies */ -#define IMPLICIT_NODES 2 -static int __logfs_readdir(struct file *file, void *buf, filldir_t filldir) +static int logfs_readdir(struct file *file, struct dir_context *ctx) { struct inode *dir = file_inode(file); - loff_t pos = file->f_pos - IMPLICIT_NODES; + loff_t pos; struct page *page; struct logfs_disk_dentry *dd; - int full; + if (ctx->pos < 0) + return -EINVAL; + + if (!dir_emit_dots(file, ctx)) + return 0; + + pos = ctx->pos - 2; BUG_ON(pos < 0); - for (;; pos++) { + for (;; pos++, ctx->pos++) { + bool full; if (beyond_eof(dir, pos)) break; if (!logfs_exist_block(dir, pos)) { @@ -306,42 +312,17 @@ static int __logfs_readdir(struct file *file, void *buf, filldir_t filldir) dd = kmap(page); BUG_ON(dd->namelen == 0); - full = filldir(buf, (char *)dd->name, be16_to_cpu(dd->namelen), - pos, be64_to_cpu(dd->ino), dd->type); + full = !dir_emit(ctx, (char *)dd->name, + be16_to_cpu(dd->namelen), + be64_to_cpu(dd->ino), dd->type); kunmap(page); page_cache_release(page); if (full) break; } - - file->f_pos = pos + IMPLICIT_NODES; return 0; } -static int logfs_readdir(struct file *file, void *buf, filldir_t filldir) -{ - struct inode *inode = file_inode(file); - ino_t pino = parent_ino(file->f_dentry); - int err; - - if (file->f_pos < 0) - return -EINVAL; - - if (file->f_pos == 0) { - if (filldir(buf, ".", 1, 1, inode->i_ino, DT_DIR) < 0) - return 0; - file->f_pos++; - } - if (file->f_pos == 1) { - if (filldir(buf, "..", 2, 2, pino, DT_DIR) < 0) - return 0; - file->f_pos++; - } - - err = __logfs_readdir(file, buf, filldir); - return err; -} - static void logfs_set_name(struct logfs_disk_dentry *dd, struct qstr *name) { dd->namelen = cpu_to_be16(name->len); @@ -814,7 +795,7 @@ const struct inode_operations logfs_dir_iops = { const struct file_operations logfs_dir_fops = { .fsync = logfs_fsync, .unlocked_ioctl = logfs_ioctl, - .readdir = logfs_readdir, + .iterate = logfs_readdir, .read = generic_read_dir, .llseek = default_llseek, }; diff --git a/fs/logfs/file.c b/fs/logfs/file.c index c2219a6dd3c8..57914fc32b62 100644 --- a/fs/logfs/file.c +++ b/fs/logfs/file.c @@ -159,7 +159,8 @@ static int logfs_writepage(struct page *page, struct writeback_control *wbc) return __logfs_writepage(page); } -static void logfs_invalidatepage(struct page *page, unsigned long offset) +static void logfs_invalidatepage(struct page *page, unsigned int offset, + unsigned int length) { struct logfs_block *block = logfs_block(page); diff --git a/fs/logfs/segment.c b/fs/logfs/segment.c index 038da0991794..d448a777166b 100644 --- a/fs/logfs/segment.c +++ b/fs/logfs/segment.c @@ -884,7 +884,8 @@ static struct logfs_area *alloc_area(struct super_block *sb) return area; } -static void map_invalidatepage(struct page *page, unsigned long l) +static void map_invalidatepage(struct page *page, unsigned int o, + unsigned int l) { return; } diff --git a/fs/minix/dir.c b/fs/minix/dir.c index a9ed6f36e6ea..dfaf6fa9b7b5 100644 --- a/fs/minix/dir.c +++ b/fs/minix/dir.c @@ -16,12 +16,12 @@ typedef struct minix_dir_entry minix_dirent; typedef struct minix3_dir_entry minix3_dirent; -static int minix_readdir(struct file *, void *, filldir_t); +static int minix_readdir(struct file *, struct dir_context *); const struct file_operations minix_dir_operations = { .llseek = generic_file_llseek, .read = generic_read_dir, - .readdir = minix_readdir, + .iterate = minix_readdir, .fsync = generic_file_fsync, }; @@ -82,22 +82,23 @@ static inline void *minix_next_entry(void *de, struct minix_sb_info *sbi) return (void*)((char*)de + sbi->s_dirsize); } -static int minix_readdir(struct file * filp, void * dirent, filldir_t filldir) +static int minix_readdir(struct file *file, struct dir_context *ctx) { - unsigned long pos = filp->f_pos; - struct inode *inode = file_inode(filp); + struct inode *inode = file_inode(file); struct super_block *sb = inode->i_sb; - unsigned offset = pos & ~PAGE_CACHE_MASK; - unsigned long n = pos >> PAGE_CACHE_SHIFT; - unsigned long npages = dir_pages(inode); struct minix_sb_info *sbi = minix_sb(sb); unsigned chunk_size = sbi->s_dirsize; - char *name; - __u32 inumber; + unsigned long npages = dir_pages(inode); + unsigned long pos = ctx->pos; + unsigned offset; + unsigned long n; - pos = (pos + chunk_size-1) & ~(chunk_size-1); + ctx->pos = pos = ALIGN(pos, chunk_size); if (pos >= inode->i_size) - goto done; + return 0; + + offset = pos & ~PAGE_CACHE_MASK; + n = pos >> PAGE_CACHE_SHIFT; for ( ; n < npages; n++, offset = 0) { char *p, *kaddr, *limit; @@ -109,6 +110,8 @@ static int minix_readdir(struct file * filp, void * dirent, filldir_t filldir) p = kaddr+offset; limit = kaddr + minix_last_byte(inode, n) - chunk_size; for ( ; p <= limit; p = minix_next_entry(p, sbi)) { + const char *name; + __u32 inumber; if (sbi->s_version == MINIX_V3) { minix3_dirent *de3 = (minix3_dirent *)p; name = de3->name; @@ -119,24 +122,17 @@ static int minix_readdir(struct file * filp, void * dirent, filldir_t filldir) inumber = de->inode; } if (inumber) { - int over; - unsigned l = strnlen(name, sbi->s_namelen); - offset = p - kaddr; - over = filldir(dirent, name, l, - (n << PAGE_CACHE_SHIFT) | offset, - inumber, DT_UNKNOWN); - if (over) { + if (!dir_emit(ctx, name, l, + inumber, DT_UNKNOWN)) { dir_put_page(page); - goto done; + return 0; } } + ctx->pos += chunk_size; } dir_put_page(page); } - -done: - filp->f_pos = (n << PAGE_CACHE_SHIFT) | offset; return 0; } diff --git a/fs/minix/namei.c b/fs/minix/namei.c index 0db73d9dd668..cd950e2331b6 100644 --- a/fs/minix/namei.c +++ b/fs/minix/namei.c @@ -54,6 +54,18 @@ static int minix_mknod(struct inode * dir, struct dentry *dentry, umode_t mode, return error; } +static int minix_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode) +{ + int error; + struct inode *inode = minix_new_inode(dir, mode, &error); + if (inode) { + minix_set_inode(inode, 0); + mark_inode_dirty(inode); + d_tmpfile(dentry, inode); + } + return error; +} + static int minix_create(struct inode *dir, struct dentry *dentry, umode_t mode, bool excl) { @@ -254,4 +266,5 @@ const struct inode_operations minix_dir_inode_operations = { .mknod = minix_mknod, .rename = minix_rename, .getattr = minix_getattr, + .tmpfile = minix_tmpfile, }; diff --git a/fs/namei.c b/fs/namei.c index 85e40d1c0a8f..b2beee7a733f 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -1352,7 +1352,7 @@ static int lookup_fast(struct nameidata *nd, */ if (nd->flags & LOOKUP_RCU) { unsigned seq; - dentry = __d_lookup_rcu(parent, &nd->last, &seq, nd->inode); + dentry = __d_lookup_rcu(parent, &nd->last, &seq); if (!dentry) goto unlazy; @@ -1787,8 +1787,7 @@ static int link_path_walk(const char *name, struct nameidata *nd) struct dentry *parent = nd->path.dentry; nd->flags &= ~LOOKUP_JUMPED; if (unlikely(parent->d_flags & DCACHE_OP_HASH)) { - err = parent->d_op->d_hash(parent, nd->inode, - &this); + err = parent->d_op->d_hash(parent, &this); if (err < 0) break; } @@ -1976,7 +1975,7 @@ static int path_lookupat(int dfd, const char *name, err = complete_walk(nd); if (!err && nd->flags & LOOKUP_DIRECTORY) { - if (!nd->inode->i_op->lookup) { + if (!can_lookup(nd->inode)) { path_put(&nd->path); err = -ENOTDIR; } @@ -2121,7 +2120,7 @@ struct dentry *lookup_one_len(const char *name, struct dentry *base, int len) * to use its own hash.. */ if (base->d_flags & DCACHE_OP_HASH) { - int err = base->d_op->d_hash(base, base->d_inode, &this); + int err = base->d_op->d_hash(base, &this); if (err < 0) return ERR_PTR(err); } @@ -2690,28 +2689,10 @@ static int do_last(struct nameidata *nd, struct path *path, nd->flags &= ~LOOKUP_PARENT; nd->flags |= op->intent; - switch (nd->last_type) { - case LAST_DOTDOT: - case LAST_DOT: + if (nd->last_type != LAST_NORM) { error = handle_dots(nd, nd->last_type); if (error) return error; - /* fallthrough */ - case LAST_ROOT: - error = complete_walk(nd); - if (error) - return error; - audit_inode(name, nd->path.dentry, 0); - if (open_flag & O_CREAT) { - error = -EISDIR; - goto out; - } - goto finish_open; - case LAST_BIND: - error = complete_walk(nd); - if (error) - return error; - audit_inode(name, dir, 0); goto finish_open; } @@ -2841,19 +2822,19 @@ finish_lookup: } nd->inode = inode; /* Why this, you ask? _Now_ we might have grown LOOKUP_JUMPED... */ +finish_open: error = complete_walk(nd); if (error) { path_put(&save_parent); return error; } + audit_inode(name, nd->path.dentry, 0); error = -EISDIR; if ((open_flag & O_CREAT) && S_ISDIR(nd->inode->i_mode)) goto out; error = -ENOTDIR; - if ((nd->flags & LOOKUP_DIRECTORY) && !nd->inode->i_op->lookup) + if ((nd->flags & LOOKUP_DIRECTORY) && !can_lookup(nd->inode)) goto out; - audit_inode(name, nd->path.dentry, 0); -finish_open: if (!S_ISREG(nd->inode->i_mode)) will_truncate = false; @@ -2920,6 +2901,67 @@ stale_open: goto retry_lookup; } +static int do_tmpfile(int dfd, struct filename *pathname, + struct nameidata *nd, int flags, + const struct open_flags *op, + struct file *file, int *opened) +{ + static const struct qstr name = QSTR_INIT("/", 1); + struct dentry *dentry, *child; + struct inode *dir; + int error = path_lookupat(dfd, pathname->name, + flags | LOOKUP_DIRECTORY, nd); + if (unlikely(error)) + return error; + error = mnt_want_write(nd->path.mnt); + if (unlikely(error)) + goto out; + /* we want directory to be writable */ + error = inode_permission(nd->inode, MAY_WRITE | MAY_EXEC); + if (error) + goto out2; + dentry = nd->path.dentry; + dir = dentry->d_inode; + if (!dir->i_op->tmpfile) { + error = -EOPNOTSUPP; + goto out2; + } + child = d_alloc(dentry, &name); + if (unlikely(!child)) { + error = -ENOMEM; + goto out2; + } + nd->flags &= ~LOOKUP_DIRECTORY; + nd->flags |= op->intent; + dput(nd->path.dentry); + nd->path.dentry = child; + error = dir->i_op->tmpfile(dir, nd->path.dentry, op->mode); + if (error) + goto out2; + audit_inode(pathname, nd->path.dentry, 0); + error = may_open(&nd->path, op->acc_mode, op->open_flag); + if (error) + goto out2; + file->f_path.mnt = nd->path.mnt; + error = finish_open(file, nd->path.dentry, NULL, opened); + if (error) + goto out2; + error = open_check_o_direct(file); + if (error) { + fput(file); + } else if (!(op->open_flag & O_EXCL)) { + struct inode *inode = file_inode(file); + spin_lock(&inode->i_lock); + inode->i_state |= I_LINKABLE; + spin_unlock(&inode->i_lock); + } +out2: + mnt_drop_write(nd->path.mnt); +out: + path_put(&nd->path); + return error; +} + static struct file *path_openat(int dfd, struct filename *pathname, struct nameidata *nd, const struct open_flags *op, int flags) { @@ -2935,6 +2977,11 @@ static struct file *path_openat(int dfd, struct filename *pathname, file->f_flags = op->open_flag; + if (unlikely(file->f_flags & O_TMPFILE)) { + error = do_tmpfile(dfd, pathname, nd, flags, op, file, &opened); + goto out; + } + error = path_init(dfd, pathname->name, flags | LOOKUP_PARENT, nd, &base); if (unlikely(error)) goto out; @@ -2987,9 +3034,10 @@ out: } struct file *do_filp_open(int dfd, struct filename *pathname, - const struct open_flags *op, int flags) + const struct open_flags *op) { struct nameidata nd; + int flags = op->lookup_flags; struct file *filp; filp = path_openat(dfd, pathname, &nd, op, flags | LOOKUP_RCU); @@ -3001,17 +3049,16 @@ struct file *do_filp_open(int dfd, struct filename *pathname, } struct file *do_file_open_root(struct dentry *dentry, struct vfsmount *mnt, - const char *name, const struct open_flags *op, int flags) + const char *name, const struct open_flags *op) { struct nameidata nd; struct file *file; struct filename filename = { .name = name }; + int flags = op->lookup_flags | LOOKUP_ROOT; nd.root.mnt = mnt; nd.root.dentry = dentry; - flags |= LOOKUP_ROOT; - if (dentry->d_inode->i_op->follow_link && op->intent & LOOKUP_OPEN) return ERR_PTR(-ELOOP); @@ -3586,12 +3633,18 @@ int vfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *new_de mutex_lock(&inode->i_mutex); /* Make sure we don't allow creating hardlink to an unlinked file */ - if (inode->i_nlink == 0) + if (inode->i_nlink == 0 && !(inode->i_state & I_LINKABLE)) error = -ENOENT; else if (max_links && inode->i_nlink >= max_links) error = -EMLINK; else error = dir->i_op->link(old_dentry, dir, new_dentry); + + if (!error && (inode->i_state & I_LINKABLE)) { + spin_lock(&inode->i_lock); + inode->i_state &= ~I_LINKABLE; + spin_unlock(&inode->i_lock); + } mutex_unlock(&inode->i_mutex); if (!error) fsnotify_link(dir, inode, new_dentry); diff --git a/fs/ncpfs/dir.c b/fs/ncpfs/dir.c index 816326093656..3be047474bfc 100644 --- a/fs/ncpfs/dir.c +++ b/fs/ncpfs/dir.c @@ -23,12 +23,12 @@ #include "ncp_fs.h" -static void ncp_read_volume_list(struct file *, void *, filldir_t, +static void ncp_read_volume_list(struct file *, struct dir_context *, struct ncp_cache_control *); -static void ncp_do_readdir(struct file *, void *, filldir_t, +static void ncp_do_readdir(struct file *, struct dir_context *, struct ncp_cache_control *); -static int ncp_readdir(struct file *, void *, filldir_t); +static int ncp_readdir(struct file *, struct dir_context *); static int ncp_create(struct inode *, struct dentry *, umode_t, bool); static struct dentry *ncp_lookup(struct inode *, struct dentry *, unsigned int); @@ -49,7 +49,7 @@ const struct file_operations ncp_dir_operations = { .llseek = generic_file_llseek, .read = generic_read_dir, - .readdir = ncp_readdir, + .iterate = ncp_readdir, .unlocked_ioctl = ncp_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = ncp_compat_ioctl, @@ -73,10 +73,8 @@ const struct inode_operations ncp_dir_inode_operations = * Dentry operations routines */ static int ncp_lookup_validate(struct dentry *, unsigned int); -static int ncp_hash_dentry(const struct dentry *, const struct inode *, - struct qstr *); -static int ncp_compare_dentry(const struct dentry *, const struct inode *, - const struct dentry *, const struct inode *, +static int ncp_hash_dentry(const struct dentry *, struct qstr *); +static int ncp_compare_dentry(const struct dentry *, const struct dentry *, unsigned int, const char *, const struct qstr *); static int ncp_delete_dentry(const struct dentry *); @@ -119,11 +117,19 @@ static inline int ncp_case_sensitive(const struct inode *i) /* * Note: leave the hash unchanged if the directory * is case-sensitive. + * + * Accessing the parent inode can be racy under RCU pathwalking. + * Use ACCESS_ONCE() to make sure we use _one_ particular inode, + * the callers will handle races. */ static int -ncp_hash_dentry(const struct dentry *dentry, const struct inode *inode, - struct qstr *this) +ncp_hash_dentry(const struct dentry *dentry, struct qstr *this) { + struct inode *inode = ACCESS_ONCE(dentry->d_inode); + + if (!inode) + return 0; + if (!ncp_case_sensitive(inode)) { struct super_block *sb = dentry->d_sb; struct nls_table *t; @@ -140,14 +146,24 @@ ncp_hash_dentry(const struct dentry *dentry, const struct inode *inode, return 0; } +/* + * Accessing the parent inode can be racy under RCU pathwalking. + * Use ACCESS_ONCE() to make sure we use _one_ particular inode, + * the callers will handle races. + */ static int -ncp_compare_dentry(const struct dentry *parent, const struct inode *pinode, - const struct dentry *dentry, const struct inode *inode, +ncp_compare_dentry(const struct dentry *parent, const struct dentry *dentry, unsigned int len, const char *str, const struct qstr *name) { + struct inode *pinode; + if (len != name->len) return 1; + pinode = ACCESS_ONCE(parent->d_inode); + if (!pinode) + return 1; + if (ncp_case_sensitive(pinode)) return strncmp(str, name->name, len); @@ -424,9 +440,9 @@ static time_t ncp_obtain_mtime(struct dentry *dentry) return ncp_date_dos2unix(i.modifyTime, i.modifyDate); } -static int ncp_readdir(struct file *filp, void *dirent, filldir_t filldir) +static int ncp_readdir(struct file *file, struct dir_context *ctx) { - struct dentry *dentry = filp->f_path.dentry; + struct dentry *dentry = file->f_path.dentry; struct inode *inode = dentry->d_inode; struct page *page = NULL; struct ncp_server *server = NCP_SERVER(inode); @@ -440,7 +456,7 @@ static int ncp_readdir(struct file *filp, void *dirent, filldir_t filldir) DDPRINTK("ncp_readdir: reading %s/%s, pos=%d\n", dentry->d_parent->d_name.name, dentry->d_name.name, - (int) filp->f_pos); + (int) ctx->pos); result = -EIO; /* Do not generate '.' and '..' when server is dead. */ @@ -448,16 +464,8 @@ static int ncp_readdir(struct file *filp, void *dirent, filldir_t filldir) goto out; result = 0; - if (filp->f_pos == 0) { - if (filldir(dirent, ".", 1, 0, inode->i_ino, DT_DIR)) - goto out; - filp->f_pos = 1; - } - if (filp->f_pos == 1) { - if (filldir(dirent, "..", 2, 1, parent_ino(dentry), DT_DIR)) - goto out; - filp->f_pos = 2; - } + if (!dir_emit_dots(file, ctx)) + goto out; page = grab_cache_page(&inode->i_data, 0); if (!page) @@ -469,7 +477,7 @@ static int ncp_readdir(struct file *filp, void *dirent, filldir_t filldir) if (!PageUptodate(page) || !ctl.head.eof) goto init_cache; - if (filp->f_pos == 2) { + if (ctx->pos == 2) { if (jiffies - ctl.head.time >= NCP_MAX_AGE(server)) goto init_cache; @@ -479,10 +487,10 @@ static int ncp_readdir(struct file *filp, void *dirent, filldir_t filldir) goto init_cache; } - if (filp->f_pos > ctl.head.end) + if (ctx->pos > ctl.head.end) goto finished; - ctl.fpos = filp->f_pos + (NCP_DIRCACHE_START - 2); + ctl.fpos = ctx->pos + (NCP_DIRCACHE_START - 2); ctl.ofs = ctl.fpos / NCP_DIRCACHE_SIZE; ctl.idx = ctl.fpos % NCP_DIRCACHE_SIZE; @@ -497,21 +505,21 @@ static int ncp_readdir(struct file *filp, void *dirent, filldir_t filldir) } while (ctl.idx < NCP_DIRCACHE_SIZE) { struct dentry *dent; - int res; + bool over; dent = ncp_dget_fpos(ctl.cache->dentry[ctl.idx], - dentry, filp->f_pos); + dentry, ctx->pos); if (!dent) goto invalid_cache; - res = filldir(dirent, dent->d_name.name, - dent->d_name.len, filp->f_pos, + over = !dir_emit(ctx, dent->d_name.name, + dent->d_name.len, dent->d_inode->i_ino, DT_UNKNOWN); dput(dent); - if (res) + if (over) goto finished; - filp->f_pos += 1; + ctx->pos += 1; ctl.idx += 1; - if (filp->f_pos > ctl.head.end) + if (ctx->pos > ctl.head.end) goto finished; } if (ctl.page) { @@ -548,9 +556,9 @@ init_cache: ctl.valid = 1; read_really: if (ncp_is_server_root(inode)) { - ncp_read_volume_list(filp, dirent, filldir, &ctl); + ncp_read_volume_list(file, ctx, &ctl); } else { - ncp_do_readdir(filp, dirent, filldir, &ctl); + ncp_do_readdir(file, ctx, &ctl); } ctl.head.end = ctl.fpos - 1; ctl.head.eof = ctl.valid; @@ -573,11 +581,11 @@ out: } static int -ncp_fill_cache(struct file *filp, void *dirent, filldir_t filldir, +ncp_fill_cache(struct file *file, struct dir_context *ctx, struct ncp_cache_control *ctrl, struct ncp_entry_info *entry, int inval_childs) { - struct dentry *newdent, *dentry = filp->f_path.dentry; + struct dentry *newdent, *dentry = file->f_path.dentry; struct inode *dir = dentry->d_inode; struct ncp_cache_control ctl = *ctrl; struct qstr qname; @@ -666,15 +674,13 @@ ncp_fill_cache(struct file *filp, void *dirent, filldir_t filldir, end_advance: if (!valid) ctl.valid = 0; - if (!ctl.filled && (ctl.fpos == filp->f_pos)) { - if (!ino) - ino = find_inode_number(dentry, &qname); + if (!ctl.filled && (ctl.fpos == ctx->pos)) { if (!ino) ino = iunique(dir->i_sb, 2); - ctl.filled = filldir(dirent, qname.name, qname.len, - filp->f_pos, ino, DT_UNKNOWN); + ctl.filled = !dir_emit(ctx, qname.name, qname.len, + ino, DT_UNKNOWN); if (!ctl.filled) - filp->f_pos += 1; + ctx->pos += 1; } ctl.fpos += 1; ctl.idx += 1; @@ -683,10 +689,10 @@ end_advance: } static void -ncp_read_volume_list(struct file *filp, void *dirent, filldir_t filldir, +ncp_read_volume_list(struct file *file, struct dir_context *ctx, struct ncp_cache_control *ctl) { - struct dentry *dentry = filp->f_path.dentry; + struct dentry *dentry = file->f_path.dentry; struct inode *inode = dentry->d_inode; struct ncp_server *server = NCP_SERVER(inode); struct ncp_volume_info info; @@ -694,7 +700,7 @@ ncp_read_volume_list(struct file *filp, void *dirent, filldir_t filldir, int i; DPRINTK("ncp_read_volume_list: pos=%ld\n", - (unsigned long) filp->f_pos); + (unsigned long) ctx->pos); for (i = 0; i < NCP_NUMBER_OF_VOLUMES; i++) { int inval_dentry; @@ -715,16 +721,16 @@ ncp_read_volume_list(struct file *filp, void *dirent, filldir_t filldir, } inval_dentry = ncp_update_known_namespace(server, entry.i.volNumber, NULL); entry.volume = entry.i.volNumber; - if (!ncp_fill_cache(filp, dirent, filldir, ctl, &entry, inval_dentry)) + if (!ncp_fill_cache(file, ctx, ctl, &entry, inval_dentry)) return; } } static void -ncp_do_readdir(struct file *filp, void *dirent, filldir_t filldir, +ncp_do_readdir(struct file *file, struct dir_context *ctx, struct ncp_cache_control *ctl) { - struct dentry *dentry = filp->f_path.dentry; + struct dentry *dentry = file->f_path.dentry; struct inode *dir = dentry->d_inode; struct ncp_server *server = NCP_SERVER(dir); struct nw_search_sequence seq; @@ -736,7 +742,7 @@ ncp_do_readdir(struct file *filp, void *dirent, filldir_t filldir, DPRINTK("ncp_do_readdir: %s/%s, fpos=%ld\n", dentry->d_parent->d_name.name, dentry->d_name.name, - (unsigned long) filp->f_pos); + (unsigned long) ctx->pos); PPRINTK("ncp_do_readdir: init %s, volnum=%d, dirent=%u\n", dentry->d_name.name, NCP_FINFO(dir)->volNumber, NCP_FINFO(dir)->dirEntNum); @@ -778,7 +784,7 @@ ncp_do_readdir(struct file *filp, void *dirent, filldir_t filldir, rpl += onerpl; rpls -= onerpl; entry.volume = entry.i.volNumber; - if (!ncp_fill_cache(filp, dirent, filldir, ctl, &entry, 0)) + if (!ncp_fill_cache(file, ctx, ctl, &entry, 0)) break; } } while (more); @@ -1029,15 +1035,6 @@ static int ncp_rmdir(struct inode *dir, struct dentry *dentry) DPRINTK("ncp_rmdir: removing %s/%s\n", dentry->d_parent->d_name.name, dentry->d_name.name); - /* - * fail with EBUSY if there are still references to this - * directory. - */ - dentry_unhash(dentry); - error = -EBUSY; - if (!d_unhashed(dentry)) - goto out; - len = sizeof(__name); error = ncp_io2vol(server, __name, &len, dentry->d_name.name, dentry->d_name.len, !ncp_preserve_case(dir)); @@ -1140,17 +1137,6 @@ static int ncp_rename(struct inode *old_dir, struct dentry *old_dentry, old_dentry->d_parent->d_name.name, old_dentry->d_name.name, new_dentry->d_parent->d_name.name, new_dentry->d_name.name); - if (new_dentry->d_inode && S_ISDIR(new_dentry->d_inode->i_mode)) { - /* - * fail with EBUSY if there are still references to this - * directory. - */ - dentry_unhash(new_dentry); - error = -EBUSY; - if (!d_unhashed(new_dentry)) - goto out; - } - ncp_age_dentry(server, old_dentry); ncp_age_dentry(server, new_dentry); diff --git a/fs/ncpfs/inode.c b/fs/ncpfs/inode.c index 26910c8154da..0765ad12c382 100644 --- a/fs/ncpfs/inode.c +++ b/fs/ncpfs/inode.c @@ -891,6 +891,10 @@ int ncp_notify_change(struct dentry *dentry, struct iattr *attr) if (!server) /* How this could happen? */ goto out; + result = -EPERM; + if (IS_DEADDIR(dentry->d_inode)) + goto out; + /* ageing the dentry to force validation */ ncp_age_dentry(server, dentry); diff --git a/fs/ncpfs/mmap.c b/fs/ncpfs/mmap.c index ee24df5af1f9..3c5dd55d284c 100644 --- a/fs/ncpfs/mmap.c +++ b/fs/ncpfs/mmap.c @@ -117,7 +117,7 @@ int ncp_mmap(struct file *file, struct vm_area_struct *vma) return -EINVAL; /* we do not support files bigger than 4GB... We eventually supports just 4GB... */ - if (((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff + if (vma_pages(vma) + vma->vm_pgoff > (1U << (32 - PAGE_SHIFT))) return -EFBIG; diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c index cff089a412c7..da6a43d19aa3 100644 --- a/fs/nfs/callback.c +++ b/fs/nfs/callback.c @@ -211,7 +211,6 @@ static int nfs_callback_start_svc(int minorversion, struct rpc_xprt *xprt, struct svc_rqst *rqstp; int (*callback_svc)(void *vrqstp); struct nfs_callback_data *cb_info = &nfs_callback_info[minorversion]; - char svc_name[12]; int ret; nfs_callback_bc_serv(minorversion, xprt, serv); @@ -235,10 +234,10 @@ static int nfs_callback_start_svc(int minorversion, struct rpc_xprt *xprt, svc_sock_update_bufs(serv); - sprintf(svc_name, "nfsv4.%u-svc", minorversion); cb_info->serv = serv; cb_info->rqst = rqstp; - cb_info->task = kthread_run(callback_svc, cb_info->rqst, svc_name); + cb_info->task = kthread_run(callback_svc, cb_info->rqst, + "nfsv4.%u-svc", minorversion); if (IS_ERR(cb_info->task)) { ret = PTR_ERR(cb_info->task); svc_exit_thread(cb_info->rqst); diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c index 57db3244f4d9..7ec4814e298d 100644 --- a/fs/nfs/delegation.c +++ b/fs/nfs/delegation.c @@ -73,20 +73,20 @@ static int nfs_delegation_claim_locks(struct nfs_open_context *ctx, struct nfs4_ if (inode->i_flock == NULL) goto out; - /* Protect inode->i_flock using the file locks lock */ - lock_flocks(); + /* Protect inode->i_flock using the i_lock */ + spin_lock(&inode->i_lock); for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) { if (!(fl->fl_flags & (FL_POSIX|FL_FLOCK))) continue; if (nfs_file_open_context(fl->fl_file) != ctx) continue; - unlock_flocks(); + spin_unlock(&inode->i_lock); status = nfs4_lock_delegation_recall(fl, state, stateid); if (status < 0) goto out; - lock_flocks(); + spin_lock(&inode->i_lock); } - unlock_flocks(); + spin_unlock(&inode->i_lock); out: return status; } diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index e093e73178b7..d7ed697133f0 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -33,6 +33,7 @@ #include <linux/pagevec.h> #include <linux/namei.h> #include <linux/mount.h> +#include <linux/swap.h> #include <linux/sched.h> #include <linux/kmemleak.h> #include <linux/xattr.h> @@ -46,7 +47,7 @@ static int nfs_opendir(struct inode *, struct file *); static int nfs_closedir(struct inode *, struct file *); -static int nfs_readdir(struct file *, void *, filldir_t); +static int nfs_readdir(struct file *, struct dir_context *); static int nfs_fsync_dir(struct file *, loff_t, loff_t, int); static loff_t nfs_llseek_dir(struct file *, loff_t, int); static void nfs_readdir_clear_array(struct page*); @@ -54,7 +55,7 @@ static void nfs_readdir_clear_array(struct page*); const struct file_operations nfs_dir_operations = { .llseek = nfs_llseek_dir, .read = generic_read_dir, - .readdir = nfs_readdir, + .iterate = nfs_readdir, .open = nfs_opendir, .release = nfs_closedir, .fsync = nfs_fsync_dir, @@ -147,6 +148,7 @@ typedef int (*decode_dirent_t)(struct xdr_stream *, struct nfs_entry *, int); typedef struct { struct file *file; struct page *page; + struct dir_context *ctx; unsigned long page_index; u64 *dir_cookie; u64 last_cookie; @@ -252,7 +254,7 @@ out: static int nfs_readdir_search_for_pos(struct nfs_cache_array *array, nfs_readdir_descriptor_t *desc) { - loff_t diff = desc->file->f_pos - desc->current_index; + loff_t diff = desc->ctx->pos - desc->current_index; unsigned int index; if (diff < 0) @@ -289,7 +291,7 @@ int nfs_readdir_search_for_cookie(struct nfs_cache_array *array, nfs_readdir_des || (nfsi->cache_validity & (NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA))) { ctx->duped = 0; ctx->attr_gencount = nfsi->attr_gencount; - } else if (new_pos < desc->file->f_pos) { + } else if (new_pos < desc->ctx->pos) { if (ctx->duped > 0 && ctx->dup_cookie == *desc->dir_cookie) { if (printk_ratelimit()) { @@ -307,7 +309,7 @@ int nfs_readdir_search_for_cookie(struct nfs_cache_array *array, nfs_readdir_des ctx->dup_cookie = *desc->dir_cookie; ctx->duped = -1; } - desc->file->f_pos = new_pos; + desc->ctx->pos = new_pos; desc->cache_entry_index = i; return 0; } @@ -405,13 +407,13 @@ different: } static -bool nfs_use_readdirplus(struct inode *dir, struct file *filp) +bool nfs_use_readdirplus(struct inode *dir, struct dir_context *ctx) { if (!nfs_server_capable(dir, NFS_CAP_READDIRPLUS)) return false; if (test_and_clear_bit(NFS_INO_ADVISE_RDPLUS, &NFS_I(dir)->flags)) return true; - if (filp->f_pos == 0) + if (ctx->pos == 0) return true; return false; } @@ -702,8 +704,7 @@ int readdir_search_pagecache(nfs_readdir_descriptor_t *desc) * Once we've found the start of the dirent within a page: fill 'er up... */ static -int nfs_do_filldir(nfs_readdir_descriptor_t *desc, void *dirent, - filldir_t filldir) +int nfs_do_filldir(nfs_readdir_descriptor_t *desc) { struct file *file = desc->file; int i = 0; @@ -721,13 +722,12 @@ int nfs_do_filldir(nfs_readdir_descriptor_t *desc, void *dirent, struct nfs_cache_array_entry *ent; ent = &array->array[i]; - if (filldir(dirent, ent->string.name, ent->string.len, - file->f_pos, nfs_compat_user_ino64(ent->ino), - ent->d_type) < 0) { + if (!dir_emit(desc->ctx, ent->string.name, ent->string.len, + nfs_compat_user_ino64(ent->ino), ent->d_type)) { desc->eof = 1; break; } - file->f_pos++; + desc->ctx->pos++; if (i < (array->size-1)) *desc->dir_cookie = array->array[i+1].cookie; else @@ -759,8 +759,7 @@ out: * directory in the page cache by the time we get here. */ static inline -int uncached_readdir(nfs_readdir_descriptor_t *desc, void *dirent, - filldir_t filldir) +int uncached_readdir(nfs_readdir_descriptor_t *desc) { struct page *page = NULL; int status; @@ -785,7 +784,7 @@ int uncached_readdir(nfs_readdir_descriptor_t *desc, void *dirent, if (status < 0) goto out_release; - status = nfs_do_filldir(desc, dirent, filldir); + status = nfs_do_filldir(desc); out: dfprintk(DIRCACHE, "NFS: %s: returns %d\n", @@ -800,35 +799,36 @@ int uncached_readdir(nfs_readdir_descriptor_t *desc, void *dirent, last cookie cache takes care of the common case of reading the whole directory. */ -static int nfs_readdir(struct file *filp, void *dirent, filldir_t filldir) +static int nfs_readdir(struct file *file, struct dir_context *ctx) { - struct dentry *dentry = filp->f_path.dentry; + struct dentry *dentry = file->f_path.dentry; struct inode *inode = dentry->d_inode; nfs_readdir_descriptor_t my_desc, *desc = &my_desc; - struct nfs_open_dir_context *dir_ctx = filp->private_data; + struct nfs_open_dir_context *dir_ctx = file->private_data; int res; dfprintk(FILE, "NFS: readdir(%s/%s) starting at cookie %llu\n", dentry->d_parent->d_name.name, dentry->d_name.name, - (long long)filp->f_pos); + (long long)ctx->pos); nfs_inc_stats(inode, NFSIOS_VFSGETDENTS); /* - * filp->f_pos points to the dirent entry number. + * ctx->pos points to the dirent entry number. * *desc->dir_cookie has the cookie for the next entry. We have * to either find the entry with the appropriate number or * revalidate the cookie. */ memset(desc, 0, sizeof(*desc)); - desc->file = filp; + desc->file = file; + desc->ctx = ctx; desc->dir_cookie = &dir_ctx->dir_cookie; desc->decode = NFS_PROTO(inode)->decode_dirent; - desc->plus = nfs_use_readdirplus(inode, filp) ? 1 : 0; + desc->plus = nfs_use_readdirplus(inode, ctx) ? 1 : 0; nfs_block_sillyrename(dentry); - res = nfs_revalidate_mapping(inode, filp->f_mapping); + res = nfs_revalidate_mapping(inode, file->f_mapping); if (res < 0) goto out; @@ -840,7 +840,7 @@ static int nfs_readdir(struct file *filp, void *dirent, filldir_t filldir) /* This means either end of directory */ if (*desc->dir_cookie && desc->eof == 0) { /* Or that the server has 'lost' a cookie */ - res = uncached_readdir(desc, dirent, filldir); + res = uncached_readdir(desc); if (res == 0) continue; } @@ -857,7 +857,7 @@ static int nfs_readdir(struct file *filp, void *dirent, filldir_t filldir) if (res < 0) break; - res = nfs_do_filldir(desc, dirent, filldir); + res = nfs_do_filldir(desc); if (res < 0) break; } while (!desc->eof); @@ -1759,7 +1759,6 @@ EXPORT_SYMBOL_GPL(nfs_unlink); */ int nfs_symlink(struct inode *dir, struct dentry *dentry, const char *symname) { - struct pagevec lru_pvec; struct page *page; char *kaddr; struct iattr attr; @@ -1799,11 +1798,8 @@ int nfs_symlink(struct inode *dir, struct dentry *dentry, const char *symname) * No big deal if we can't add this page to the page cache here. * READLINK will get the missing page from the server if needed. */ - pagevec_init(&lru_pvec, 0); - if (!add_to_page_cache(page, dentry->d_inode->i_mapping, 0, + if (!add_to_page_cache_lru(page, dentry->d_inode->i_mapping, 0, GFP_KERNEL)) { - pagevec_add(&lru_pvec, page); - pagevec_lru_add_file(&lru_pvec); SetPageUptodate(page); unlock_page(page); } else diff --git a/fs/nfs/file.c b/fs/nfs/file.c index a87a44f84113..94e94bd11aae 100644 --- a/fs/nfs/file.c +++ b/fs/nfs/file.c @@ -451,11 +451,13 @@ static int nfs_write_end(struct file *file, struct address_space *mapping, * - Called if either PG_private or PG_fscache is set on the page * - Caller holds page lock */ -static void nfs_invalidate_page(struct page *page, unsigned long offset) +static void nfs_invalidate_page(struct page *page, unsigned int offset, + unsigned int length) { - dfprintk(PAGECACHE, "NFS: invalidate_page(%p, %lu)\n", page, offset); + dfprintk(PAGECACHE, "NFS: invalidate_page(%p, %u, %u)\n", + page, offset, length); - if (offset != 0) + if (offset != 0 || length < PAGE_CACHE_SIZE) return; /* Cancel any unstarted writes on this page */ nfs_wb_page_cancel(page_file_mapping(page)->host, page); @@ -493,6 +495,35 @@ static int nfs_release_page(struct page *page, gfp_t gfp) return nfs_fscache_release_page(page, gfp); } +static void nfs_check_dirty_writeback(struct page *page, + bool *dirty, bool *writeback) +{ + struct nfs_inode *nfsi; + struct address_space *mapping = page_file_mapping(page); + + if (!mapping || PageSwapCache(page)) + return; + + /* + * Check if an unstable page is currently being committed and + * if so, have the VM treat it as if the page is under writeback + * so it will not block due to pages that will shortly be freeable. + */ + nfsi = NFS_I(mapping->host); + if (test_bit(NFS_INO_COMMIT, &nfsi->flags)) { + *writeback = true; + return; + } + + /* + * If PagePrivate() is set, then the page is not freeable and as the + * inode is not being committed, it's not going to be cleaned in the + * near future so treat it as dirty + */ + if (PagePrivate(page)) + *dirty = true; +} + /* * Attempt to clear the private state associated with a page when an error * occurs that requires the cached contents of an inode to be written back or @@ -540,6 +571,7 @@ const struct address_space_operations nfs_file_aops = { .direct_IO = nfs_direct_IO, .migratepage = nfs_migrate_page, .launder_page = nfs_launder_page, + .is_dirty_writeback = nfs_check_dirty_writeback, .error_remove_page = generic_error_remove_page, #ifdef CONFIG_NFS_SWAP .swap_activate = nfs_swap_activate, diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index c1c7a9d78722..ce727047ee87 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c @@ -79,7 +79,7 @@ int nfs_wait_bit_killable(void *word) { if (fatal_signal_pending(current)) return -ERESTARTSYS; - freezable_schedule(); + freezable_schedule_unsafe(); return 0; } EXPORT_SYMBOL_GPL(nfs_wait_bit_killable); diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c index 43ea96ced28c..ce90eb4775c2 100644 --- a/fs/nfs/nfs3proc.c +++ b/fs/nfs/nfs3proc.c @@ -33,7 +33,7 @@ nfs3_rpc_wrapper(struct rpc_clnt *clnt, struct rpc_message *msg, int flags) res = rpc_call_sync(clnt, msg, flags); if (res != -EJUKEBOX) break; - freezable_schedule_timeout_killable(NFS_JUKEBOX_RETRY_TIME); + freezable_schedule_timeout_killable_unsafe(NFS_JUKEBOX_RETRY_TIME); res = -ERESTARTSYS; } while (!fatal_signal_pending(current)); return res; diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index d7ba5616989c..28241a42f363 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -268,7 +268,7 @@ static int nfs4_delay(struct rpc_clnt *clnt, long *timeout) *timeout = NFS4_POLL_RETRY_MIN; if (*timeout > NFS4_POLL_RETRY_MAX) *timeout = NFS4_POLL_RETRY_MAX; - freezable_schedule_timeout_killable(*timeout); + freezable_schedule_timeout_killable_unsafe(*timeout); if (fatal_signal_pending(current)) res = -ERESTARTSYS; *timeout <<= 1; @@ -4528,7 +4528,7 @@ int nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4 static unsigned long nfs4_set_lock_task_retry(unsigned long timeout) { - freezable_schedule_timeout_killable(timeout); + freezable_schedule_timeout_killable_unsafe(timeout); timeout <<= 1; if (timeout > NFS4_LOCK_MAXTIMEOUT) return NFS4_LOCK_MAXTIMEOUT; diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index 1fab140764c4..55418811a55a 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -1194,7 +1194,7 @@ void nfs4_schedule_state_manager(struct nfs_client *clp) snprintf(buf, sizeof(buf), "%s-manager", rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR)); rcu_read_unlock(); - task = kthread_run(nfs4_run_state_manager, clp, buf); + task = kthread_run(nfs4_run_state_manager, clp, "%s", buf); if (IS_ERR(task)) { printk(KERN_ERR "%s: kthread_run: %ld\n", __func__, PTR_ERR(task)); @@ -1373,13 +1373,13 @@ static int nfs4_reclaim_locks(struct nfs4_state *state, const struct nfs4_state_ /* Guard against delegation returns and new lock/unlock calls */ down_write(&nfsi->rwsem); /* Protect inode->i_flock using the BKL */ - lock_flocks(); + spin_lock(&inode->i_lock); for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) { if (!(fl->fl_flags & (FL_POSIX|FL_FLOCK))) continue; if (nfs_file_open_context(fl->fl_file)->state != state) continue; - unlock_flocks(); + spin_unlock(&inode->i_lock); status = ops->recover_lock(state, fl); switch (status) { case 0: @@ -1406,9 +1406,9 @@ static int nfs4_reclaim_locks(struct nfs4_state *state, const struct nfs4_state_ /* kill_proc(fl->fl_pid, SIGLOST, 1); */ status = 0; } - lock_flocks(); + spin_lock(&inode->i_lock); } - unlock_flocks(); + spin_unlock(&inode->i_lock); out: up_write(&nfsi->rwsem); return status; diff --git a/fs/nfsd/nfs4recover.c b/fs/nfsd/nfs4recover.c index 4e9a21db867a..105a3b080d12 100644 --- a/fs/nfsd/nfs4recover.c +++ b/fs/nfsd/nfs4recover.c @@ -240,11 +240,16 @@ struct name_list { struct list_head list; }; +struct nfs4_dir_ctx { + struct dir_context ctx; + struct list_head names; +}; + static int nfsd4_build_namelist(void *arg, const char *name, int namlen, loff_t offset, u64 ino, unsigned int d_type) { - struct list_head *names = arg; + struct nfs4_dir_ctx *ctx = arg; struct name_list *entry; if (namlen != HEXDIR_LEN - 1) @@ -254,7 +259,7 @@ nfsd4_build_namelist(void *arg, const char *name, int namlen, return -ENOMEM; memcpy(entry->name, name, HEXDIR_LEN - 1); entry->name[HEXDIR_LEN - 1] = '\0'; - list_add(&entry->list, names); + list_add(&entry->list, &ctx->names); return 0; } @@ -263,7 +268,10 @@ nfsd4_list_rec_dir(recdir_func *f, struct nfsd_net *nn) { const struct cred *original_cred; struct dentry *dir = nn->rec_file->f_path.dentry; - LIST_HEAD(names); + struct nfs4_dir_ctx ctx = { + .ctx.actor = nfsd4_build_namelist, + .names = LIST_HEAD_INIT(ctx.names) + }; int status; status = nfs4_save_creds(&original_cred); @@ -276,11 +284,11 @@ nfsd4_list_rec_dir(recdir_func *f, struct nfsd_net *nn) return status; } - status = vfs_readdir(nn->rec_file, nfsd4_build_namelist, &names); + status = iterate_dir(nn->rec_file, &ctx.ctx); mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_PARENT); - while (!list_empty(&names)) { + while (!list_empty(&ctx.names)) { struct name_list *entry; - entry = list_entry(names.next, struct name_list, list); + entry = list_entry(ctx.names.next, struct name_list, list); if (!status) { struct dentry *dentry; dentry = lookup_one_len(entry->name, dir, HEXDIR_LEN-1); diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 316ec843dec2..f17051838b41 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -2645,13 +2645,13 @@ static void nfsd_break_one_deleg(struct nfs4_delegation *dp) list_add_tail(&dp->dl_recall_lru, &nn->del_recall_lru); - /* only place dl_time is set. protected by lock_flocks*/ + /* Only place dl_time is set; protected by i_lock: */ dp->dl_time = get_seconds(); nfsd4_cb_recall(dp); } -/* Called from break_lease() with lock_flocks() held. */ +/* Called from break_lease() with i_lock held. */ static void nfsd_break_deleg_cb(struct file_lock *fl) { struct nfs4_file *fp = (struct nfs4_file *)fl->fl_owner; @@ -4520,7 +4520,7 @@ check_for_locks(struct nfs4_file *filp, struct nfs4_lockowner *lowner) struct inode *inode = filp->fi_inode; int status = 0; - lock_flocks(); + spin_lock(&inode->i_lock); for (flpp = &inode->i_flock; *flpp != NULL; flpp = &(*flpp)->fl_next) { if ((*flpp)->fl_owner == (fl_owner_t)lowner) { status = 1; @@ -4528,7 +4528,7 @@ check_for_locks(struct nfs4_file *filp, struct nfs4_lockowner *lowner) } } out: - unlock_flocks(); + spin_unlock(&inode->i_lock); return status; } diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c index 84ce601d8063..a6bc8a7423db 100644 --- a/fs/nfsd/vfs.c +++ b/fs/nfsd/vfs.c @@ -1912,6 +1912,7 @@ struct buffered_dirent { }; struct readdir_data { + struct dir_context ctx; char *dirent; size_t used; int full; @@ -1943,13 +1944,15 @@ static int nfsd_buffered_filldir(void *__buf, const char *name, int namlen, static __be32 nfsd_buffered_readdir(struct file *file, filldir_t func, struct readdir_cd *cdp, loff_t *offsetp) { - struct readdir_data buf; struct buffered_dirent *de; int host_err; int size; loff_t offset; + struct readdir_data buf = { + .ctx.actor = nfsd_buffered_filldir, + .dirent = (void *)__get_free_page(GFP_KERNEL) + }; - buf.dirent = (void *)__get_free_page(GFP_KERNEL); if (!buf.dirent) return nfserrno(-ENOMEM); @@ -1963,7 +1966,7 @@ static __be32 nfsd_buffered_readdir(struct file *file, filldir_t func, buf.used = 0; buf.full = 0; - host_err = vfs_readdir(file, nfsd_buffered_filldir, &buf); + host_err = iterate_dir(file, &buf.ctx); if (buf.full) host_err = 0; diff --git a/fs/nilfs2/alloc.c b/fs/nilfs2/alloc.c index eed4d7b26249..741fd02e0444 100644 --- a/fs/nilfs2/alloc.c +++ b/fs/nilfs2/alloc.c @@ -398,6 +398,69 @@ nilfs_palloc_rest_groups_in_desc_block(const struct inode *inode, } /** + * nilfs_palloc_count_desc_blocks - count descriptor blocks number + * @inode: inode of metadata file using this allocator + * @desc_blocks: descriptor blocks number [out] + */ +static int nilfs_palloc_count_desc_blocks(struct inode *inode, + unsigned long *desc_blocks) +{ + unsigned long blknum; + int ret; + + ret = nilfs_bmap_last_key(NILFS_I(inode)->i_bmap, &blknum); + if (likely(!ret)) + *desc_blocks = DIV_ROUND_UP( + blknum, NILFS_MDT(inode)->mi_blocks_per_desc_block); + return ret; +} + +/** + * nilfs_palloc_mdt_file_can_grow - check potential opportunity for + * MDT file growing + * @inode: inode of metadata file using this allocator + * @desc_blocks: known current descriptor blocks count + */ +static inline bool nilfs_palloc_mdt_file_can_grow(struct inode *inode, + unsigned long desc_blocks) +{ + return (nilfs_palloc_groups_per_desc_block(inode) * desc_blocks) < + nilfs_palloc_groups_count(inode); +} + +/** + * nilfs_palloc_count_max_entries - count max number of entries that can be + * described by descriptor blocks count + * @inode: inode of metadata file using this allocator + * @nused: current number of used entries + * @nmaxp: max number of entries [out] + */ +int nilfs_palloc_count_max_entries(struct inode *inode, u64 nused, u64 *nmaxp) +{ + unsigned long desc_blocks = 0; + u64 entries_per_desc_block, nmax; + int err; + + err = nilfs_palloc_count_desc_blocks(inode, &desc_blocks); + if (unlikely(err)) + return err; + + entries_per_desc_block = (u64)nilfs_palloc_entries_per_group(inode) * + nilfs_palloc_groups_per_desc_block(inode); + nmax = entries_per_desc_block * desc_blocks; + + if (nused == nmax && + nilfs_palloc_mdt_file_can_grow(inode, desc_blocks)) + nmax += entries_per_desc_block; + + if (nused > nmax) + return -ERANGE; + + *nmaxp = nmax; + return 0; +} + +/** * nilfs_palloc_prepare_alloc_entry - prepare to allocate a persistent object * @inode: inode of metadata file using this allocator * @req: nilfs_palloc_req structure exchanged for the allocation diff --git a/fs/nilfs2/alloc.h b/fs/nilfs2/alloc.h index fb7238100548..4bd6451b5703 100644 --- a/fs/nilfs2/alloc.h +++ b/fs/nilfs2/alloc.h @@ -48,6 +48,8 @@ int nilfs_palloc_get_entry_block(struct inode *, __u64, int, void *nilfs_palloc_block_get_entry(const struct inode *, __u64, const struct buffer_head *, void *); +int nilfs_palloc_count_max_entries(struct inode *, u64, u64 *); + /** * nilfs_palloc_req - persistent allocator request and reply * @pr_entry_nr: entry number (vblocknr or inode number) diff --git a/fs/nilfs2/dir.c b/fs/nilfs2/dir.c index f30b017740a7..197a63e9d102 100644 --- a/fs/nilfs2/dir.c +++ b/fs/nilfs2/dir.c @@ -256,22 +256,18 @@ static void nilfs_set_de_type(struct nilfs_dir_entry *de, struct inode *inode) de->file_type = nilfs_type_by_mode[(mode & S_IFMT)>>S_SHIFT]; } -static int nilfs_readdir(struct file *filp, void *dirent, filldir_t filldir) +static int nilfs_readdir(struct file *file, struct dir_context *ctx) { - loff_t pos = filp->f_pos; - struct inode *inode = file_inode(filp); + loff_t pos = ctx->pos; + struct inode *inode = file_inode(file); struct super_block *sb = inode->i_sb; unsigned int offset = pos & ~PAGE_CACHE_MASK; unsigned long n = pos >> PAGE_CACHE_SHIFT; unsigned long npages = dir_pages(inode); /* unsigned chunk_mask = ~(nilfs_chunk_size(inode)-1); */ - unsigned char *types = NULL; - int ret; if (pos > inode->i_size - NILFS_DIR_REC_LEN(1)) - goto success; - - types = nilfs_filetype_table; + return 0; for ( ; n < npages; n++, offset = 0) { char *kaddr, *limit; @@ -281,9 +277,8 @@ static int nilfs_readdir(struct file *filp, void *dirent, filldir_t filldir) if (IS_ERR(page)) { nilfs_error(sb, __func__, "bad page in #%lu", inode->i_ino); - filp->f_pos += PAGE_CACHE_SIZE - offset; - ret = -EIO; - goto done; + ctx->pos += PAGE_CACHE_SIZE - offset; + return -EIO; } kaddr = page_address(page); de = (struct nilfs_dir_entry *)(kaddr + offset); @@ -293,35 +288,28 @@ static int nilfs_readdir(struct file *filp, void *dirent, filldir_t filldir) if (de->rec_len == 0) { nilfs_error(sb, __func__, "zero-length directory entry"); - ret = -EIO; nilfs_put_page(page); - goto done; + return -EIO; } if (de->inode) { - int over; - unsigned char d_type = DT_UNKNOWN; + unsigned char t; - if (types && de->file_type < NILFS_FT_MAX) - d_type = types[de->file_type]; + if (de->file_type < NILFS_FT_MAX) + t = nilfs_filetype_table[de->file_type]; + else + t = DT_UNKNOWN; - offset = (char *)de - kaddr; - over = filldir(dirent, de->name, de->name_len, - (n<<PAGE_CACHE_SHIFT) | offset, - le64_to_cpu(de->inode), d_type); - if (over) { + if (!dir_emit(ctx, de->name, de->name_len, + le64_to_cpu(de->inode), t)) { nilfs_put_page(page); - goto success; + return 0; } } - filp->f_pos += nilfs_rec_len_from_disk(de->rec_len); + ctx->pos += nilfs_rec_len_from_disk(de->rec_len); } nilfs_put_page(page); } - -success: - ret = 0; -done: - return ret; + return 0; } /* @@ -678,7 +666,7 @@ not_empty: const struct file_operations nilfs_dir_operations = { .llseek = generic_file_llseek, .read = generic_read_dir, - .readdir = nilfs_readdir, + .iterate = nilfs_readdir, .unlocked_ioctl = nilfs_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = nilfs_compat_ioctl, diff --git a/fs/nilfs2/ifile.c b/fs/nilfs2/ifile.c index d8e65bde083c..6548c7851b48 100644 --- a/fs/nilfs2/ifile.c +++ b/fs/nilfs2/ifile.c @@ -160,6 +160,28 @@ int nilfs_ifile_get_inode_block(struct inode *ifile, ino_t ino, } /** + * nilfs_ifile_count_free_inodes - calculate free inodes count + * @ifile: ifile inode + * @nmaxinodes: current maximum of available inodes count [out] + * @nfreeinodes: free inodes count [out] + */ +int nilfs_ifile_count_free_inodes(struct inode *ifile, + u64 *nmaxinodes, u64 *nfreeinodes) +{ + u64 nused; + int err; + + *nmaxinodes = 0; + *nfreeinodes = 0; + + nused = atomic64_read(&NILFS_I(ifile)->i_root->inodes_count); + err = nilfs_palloc_count_max_entries(ifile, nused, nmaxinodes); + if (likely(!err)) + *nfreeinodes = *nmaxinodes - nused; + return err; +} + +/** * nilfs_ifile_read - read or get ifile inode * @sb: super block instance * @root: root object diff --git a/fs/nilfs2/ifile.h b/fs/nilfs2/ifile.h index 59b6f2b51df6..679674d13372 100644 --- a/fs/nilfs2/ifile.h +++ b/fs/nilfs2/ifile.h @@ -49,6 +49,8 @@ int nilfs_ifile_create_inode(struct inode *, ino_t *, struct buffer_head **); int nilfs_ifile_delete_inode(struct inode *, ino_t); int nilfs_ifile_get_inode_block(struct inode *, ino_t, struct buffer_head **); +int nilfs_ifile_count_free_inodes(struct inode *, u64 *, u64 *); + int nilfs_ifile_read(struct super_block *sb, struct nilfs_root *root, size_t inode_size, struct nilfs_inode *raw_inode, struct inode **inodep); diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c index bccfec8343c5..b1a5277cfd18 100644 --- a/fs/nilfs2/inode.c +++ b/fs/nilfs2/inode.c @@ -54,7 +54,7 @@ void nilfs_inode_add_blocks(struct inode *inode, int n) inode_add_bytes(inode, (1 << inode->i_blkbits) * n); if (root) - atomic_add(n, &root->blocks_count); + atomic64_add(n, &root->blocks_count); } void nilfs_inode_sub_blocks(struct inode *inode, int n) @@ -63,7 +63,7 @@ void nilfs_inode_sub_blocks(struct inode *inode, int n) inode_sub_bytes(inode, (1 << inode->i_blkbits) * n); if (root) - atomic_sub(n, &root->blocks_count); + atomic64_sub(n, &root->blocks_count); } /** @@ -369,7 +369,7 @@ struct inode *nilfs_new_inode(struct inode *dir, umode_t mode) goto failed_ifile_create_inode; /* reference count of i_bh inherits from nilfs_mdt_read_block() */ - atomic_inc(&root->inodes_count); + atomic64_inc(&root->inodes_count); inode_init_owner(inode, dir, mode); inode->i_ino = ino; inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; @@ -801,7 +801,7 @@ void nilfs_evict_inode(struct inode *inode) ret = nilfs_ifile_delete_inode(ii->i_root->ifile, inode->i_ino); if (!ret) - atomic_dec(&ii->i_root->inodes_count); + atomic64_dec(&ii->i_root->inodes_count); nilfs_clear_inode(inode); diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c index a5752a589932..bd88a7461063 100644 --- a/fs/nilfs2/segment.c +++ b/fs/nilfs2/segment.c @@ -835,9 +835,9 @@ static int nilfs_segctor_fill_in_checkpoint(struct nilfs_sc_info *sci) raw_cp->cp_snapshot_list.ssl_next = 0; raw_cp->cp_snapshot_list.ssl_prev = 0; raw_cp->cp_inodes_count = - cpu_to_le64(atomic_read(&sci->sc_root->inodes_count)); + cpu_to_le64(atomic64_read(&sci->sc_root->inodes_count)); raw_cp->cp_blocks_count = - cpu_to_le64(atomic_read(&sci->sc_root->blocks_count)); + cpu_to_le64(atomic64_read(&sci->sc_root->blocks_count)); raw_cp->cp_nblk_inc = cpu_to_le64(sci->sc_nblk_inc + sci->sc_nblk_this_inc); raw_cp->cp_create = cpu_to_le64(sci->sc_seg_ctime); diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c index c7d1f9f18b09..1427de5ebf4d 100644 --- a/fs/nilfs2/super.c +++ b/fs/nilfs2/super.c @@ -554,8 +554,10 @@ int nilfs_attach_checkpoint(struct super_block *sb, __u64 cno, int curr_mnt, if (err) goto failed_bh; - atomic_set(&root->inodes_count, le64_to_cpu(raw_cp->cp_inodes_count)); - atomic_set(&root->blocks_count, le64_to_cpu(raw_cp->cp_blocks_count)); + atomic64_set(&root->inodes_count, + le64_to_cpu(raw_cp->cp_inodes_count)); + atomic64_set(&root->blocks_count, + le64_to_cpu(raw_cp->cp_blocks_count)); nilfs_cpfile_put_checkpoint(nilfs->ns_cpfile, cno, bh_cp); @@ -609,6 +611,7 @@ static int nilfs_statfs(struct dentry *dentry, struct kstatfs *buf) unsigned long overhead; unsigned long nrsvblocks; sector_t nfreeblocks; + u64 nmaxinodes, nfreeinodes; int err; /* @@ -633,14 +636,34 @@ static int nilfs_statfs(struct dentry *dentry, struct kstatfs *buf) if (unlikely(err)) return err; + err = nilfs_ifile_count_free_inodes(root->ifile, + &nmaxinodes, &nfreeinodes); + if (unlikely(err)) { + printk(KERN_WARNING + "NILFS warning: fail to count free inodes: err %d.\n", + err); + if (err == -ERANGE) { + /* + * If nilfs_palloc_count_max_entries() returns + * -ERANGE error code then we simply treat + * curent inodes count as maximum possible and + * zero as free inodes value. + */ + nmaxinodes = atomic64_read(&root->inodes_count); + nfreeinodes = 0; + err = 0; + } else + return err; + } + buf->f_type = NILFS_SUPER_MAGIC; buf->f_bsize = sb->s_blocksize; buf->f_blocks = blocks - overhead; buf->f_bfree = nfreeblocks; buf->f_bavail = (buf->f_bfree >= nrsvblocks) ? (buf->f_bfree - nrsvblocks) : 0; - buf->f_files = atomic_read(&root->inodes_count); - buf->f_ffree = 0; /* nilfs_count_free_inodes(sb); */ + buf->f_files = nmaxinodes; + buf->f_ffree = nfreeinodes; buf->f_namelen = NILFS_NAME_LEN; buf->f_fsid.val[0] = (u32)id; buf->f_fsid.val[1] = (u32)(id >> 32); diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c index 41e6a04a561f..94c451ce6d24 100644 --- a/fs/nilfs2/the_nilfs.c +++ b/fs/nilfs2/the_nilfs.c @@ -764,8 +764,8 @@ nilfs_find_or_create_root(struct the_nilfs *nilfs, __u64 cno) new->ifile = NULL; new->nilfs = nilfs; atomic_set(&new->count, 1); - atomic_set(&new->inodes_count, 0); - atomic_set(&new->blocks_count, 0); + atomic64_set(&new->inodes_count, 0); + atomic64_set(&new->blocks_count, 0); rb_link_node(&new->rb_node, parent, p); rb_insert_color(&new->rb_node, &nilfs->ns_cptree); diff --git a/fs/nilfs2/the_nilfs.h b/fs/nilfs2/the_nilfs.h index be1267a34cea..de8cc53b4a5c 100644 --- a/fs/nilfs2/the_nilfs.h +++ b/fs/nilfs2/the_nilfs.h @@ -241,8 +241,8 @@ struct nilfs_root { struct the_nilfs *nilfs; struct inode *ifile; - atomic_t inodes_count; - atomic_t blocks_count; + atomic64_t inodes_count; + atomic64_t blocks_count; }; /* Special checkpoint number */ diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c index 6c80083a984f..1ea52f7c031f 100644 --- a/fs/notify/fanotify/fanotify_user.c +++ b/fs/notify/fanotify/fanotify_user.c @@ -399,9 +399,6 @@ static int fanotify_release(struct inode *ignored, struct file *file) wake_up(&group->fanotify_data.access_waitq); #endif - if (file->f_flags & FASYNC) - fsnotify_fasync(-1, file, 0); - /* matches the fanotify_init->fsnotify_alloc_group */ fsnotify_destroy_group(group); diff --git a/fs/ntfs/aops.c b/fs/ntfs/aops.c index fa9c05f97af4..d267ea6aa1a0 100644 --- a/fs/ntfs/aops.c +++ b/fs/ntfs/aops.c @@ -1372,7 +1372,7 @@ retry_writepage: * The page may have dirty, unmapped buffers. Make them * freeable here, so the page does not leak. */ - block_invalidatepage(page, 0); + block_invalidatepage(page, 0, PAGE_CACHE_SIZE); unlock_page(page); ntfs_debug("Write outside i_size - truncated?"); return 0; diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c index aa411c3f20e9..9e38dafa3bc7 100644 --- a/fs/ntfs/dir.c +++ b/fs/ntfs/dir.c @@ -1004,13 +1004,11 @@ dir_err_out: /** * ntfs_filldir - ntfs specific filldir method * @vol: current ntfs volume - * @fpos: position in the directory * @ndir: ntfs inode of current directory * @ia_page: page in which the index allocation buffer @ie is in resides * @ie: current index entry * @name: buffer to use for the converted name - * @dirent: vfs filldir callback context - * @filldir: vfs filldir callback + * @actor: what to feed the entries to * * Convert the Unicode @name to the loaded NLS and pass it to the @filldir * callback. @@ -1024,12 +1022,12 @@ dir_err_out: * retake the lock if we are returning a non-zero value as ntfs_readdir() * would need to drop the lock immediately anyway. */ -static inline int ntfs_filldir(ntfs_volume *vol, loff_t fpos, +static inline int ntfs_filldir(ntfs_volume *vol, ntfs_inode *ndir, struct page *ia_page, INDEX_ENTRY *ie, - u8 *name, void *dirent, filldir_t filldir) + u8 *name, struct dir_context *actor) { unsigned long mref; - int name_len, rc; + int name_len; unsigned dt_type; FILE_NAME_TYPE_FLAGS name_type; @@ -1068,13 +1066,14 @@ static inline int ntfs_filldir(ntfs_volume *vol, loff_t fpos, if (ia_page) unlock_page(ia_page); ntfs_debug("Calling filldir for %s with len %i, fpos 0x%llx, inode " - "0x%lx, DT_%s.", name, name_len, fpos, mref, + "0x%lx, DT_%s.", name, name_len, actor->pos, mref, dt_type == DT_DIR ? "DIR" : "REG"); - rc = filldir(dirent, name, name_len, fpos, mref, dt_type); + if (!dir_emit(actor, name, name_len, mref, dt_type)) + return 1; /* Relock the page but not if we are aborting ->readdir. */ - if (!rc && ia_page) + if (ia_page) lock_page(ia_page); - return rc; + return 0; } /* @@ -1097,11 +1096,11 @@ static inline int ntfs_filldir(ntfs_volume *vol, loff_t fpos, * removes them again after the write is complete after which it * unlocks the page. */ -static int ntfs_readdir(struct file *filp, void *dirent, filldir_t filldir) +static int ntfs_readdir(struct file *file, struct dir_context *actor) { s64 ia_pos, ia_start, prev_ia_pos, bmp_pos; - loff_t fpos, i_size; - struct inode *bmp_vi, *vdir = file_inode(filp); + loff_t i_size; + struct inode *bmp_vi, *vdir = file_inode(file); struct super_block *sb = vdir->i_sb; ntfs_inode *ndir = NTFS_I(vdir); ntfs_volume *vol = NTFS_SB(sb); @@ -1116,33 +1115,16 @@ static int ntfs_readdir(struct file *filp, void *dirent, filldir_t filldir) u8 *kaddr, *bmp, *index_end; ntfs_attr_search_ctx *ctx; - fpos = filp->f_pos; ntfs_debug("Entering for inode 0x%lx, fpos 0x%llx.", - vdir->i_ino, fpos); + vdir->i_ino, actor->pos); rc = err = 0; /* Are we at end of dir yet? */ i_size = i_size_read(vdir); - if (fpos >= i_size + vol->mft_record_size) - goto done; + if (actor->pos >= i_size + vol->mft_record_size) + return 0; /* Emulate . and .. for all directories. */ - if (!fpos) { - ntfs_debug("Calling filldir for . with len 1, fpos 0x0, " - "inode 0x%lx, DT_DIR.", vdir->i_ino); - rc = filldir(dirent, ".", 1, fpos, vdir->i_ino, DT_DIR); - if (rc) - goto done; - fpos++; - } - if (fpos == 1) { - ntfs_debug("Calling filldir for .. with len 2, fpos 0x1, " - "inode 0x%lx, DT_DIR.", - (unsigned long)parent_ino(filp->f_path.dentry)); - rc = filldir(dirent, "..", 2, fpos, - parent_ino(filp->f_path.dentry), DT_DIR); - if (rc) - goto done; - fpos++; - } + if (!dir_emit_dots(file, actor)) + return 0; m = NULL; ctx = NULL; /* @@ -1155,7 +1137,7 @@ static int ntfs_readdir(struct file *filp, void *dirent, filldir_t filldir) goto err_out; } /* Are we jumping straight into the index allocation attribute? */ - if (fpos >= vol->mft_record_size) + if (actor->pos >= vol->mft_record_size) goto skip_index_root; /* Get hold of the mft record for the directory. */ m = map_mft_record(ndir); @@ -1170,7 +1152,7 @@ static int ntfs_readdir(struct file *filp, void *dirent, filldir_t filldir) goto err_out; } /* Get the offset into the index root attribute. */ - ir_pos = (s64)fpos; + ir_pos = (s64)actor->pos; /* Find the index root attribute in the mft record. */ err = ntfs_attr_lookup(AT_INDEX_ROOT, I30, 4, CASE_SENSITIVE, 0, NULL, 0, ctx); @@ -1226,10 +1208,9 @@ static int ntfs_readdir(struct file *filp, void *dirent, filldir_t filldir) if (ir_pos > (u8*)ie - (u8*)ir) continue; /* Advance the position even if going to skip the entry. */ - fpos = (u8*)ie - (u8*)ir; + actor->pos = (u8*)ie - (u8*)ir; /* Submit the name to the filldir callback. */ - rc = ntfs_filldir(vol, fpos, ndir, NULL, ie, name, dirent, - filldir); + rc = ntfs_filldir(vol, ndir, NULL, ie, name, actor); if (rc) { kfree(ir); goto abort; @@ -1242,12 +1223,12 @@ static int ntfs_readdir(struct file *filp, void *dirent, filldir_t filldir) if (!NInoIndexAllocPresent(ndir)) goto EOD; /* Advance fpos to the beginning of the index allocation. */ - fpos = vol->mft_record_size; + actor->pos = vol->mft_record_size; skip_index_root: kaddr = NULL; prev_ia_pos = -1LL; /* Get the offset into the index allocation attribute. */ - ia_pos = (s64)fpos - vol->mft_record_size; + ia_pos = (s64)actor->pos - vol->mft_record_size; ia_mapping = vdir->i_mapping; ntfs_debug("Inode 0x%lx, getting index bitmap.", vdir->i_ino); bmp_vi = ntfs_attr_iget(vdir, AT_BITMAP, I30, 4); @@ -1409,7 +1390,7 @@ find_next_index_buffer: if (ia_pos - ia_start > (u8*)ie - (u8*)ia) continue; /* Advance the position even if going to skip the entry. */ - fpos = (u8*)ie - (u8*)ia + + actor->pos = (u8*)ie - (u8*)ia + (sle64_to_cpu(ia->index_block_vcn) << ndir->itype.index.vcn_size_bits) + vol->mft_record_size; @@ -1419,8 +1400,7 @@ find_next_index_buffer: * before returning, unless a non-zero value is returned in * which case the page is left unlocked. */ - rc = ntfs_filldir(vol, fpos, ndir, ia_page, ie, name, dirent, - filldir); + rc = ntfs_filldir(vol, ndir, ia_page, ie, name, actor); if (rc) { /* @ia_page is already unlocked in this case. */ ntfs_unmap_page(ia_page); @@ -1439,18 +1419,9 @@ unm_EOD: iput(bmp_vi); EOD: /* We are finished, set fpos to EOD. */ - fpos = i_size + vol->mft_record_size; + actor->pos = i_size + vol->mft_record_size; abort: kfree(name); -done: -#ifdef DEBUG - if (!rc) - ntfs_debug("EOD, fpos 0x%llx, returning 0.", fpos); - else - ntfs_debug("filldir returned %i, fpos 0x%llx, returning 0.", - rc, fpos); -#endif - filp->f_pos = fpos; return 0; err_out: if (bmp_page) { @@ -1471,7 +1442,6 @@ iput_err_out: if (!err) err = -EIO; ntfs_debug("Failed. Returning error code %i.", -err); - filp->f_pos = fpos; return err; } @@ -1571,7 +1541,7 @@ static int ntfs_dir_fsync(struct file *filp, loff_t start, loff_t end, const struct file_operations ntfs_dir_ops = { .llseek = generic_file_llseek, /* Seek inside directory. */ .read = generic_read_dir, /* Return -EISDIR. */ - .readdir = ntfs_readdir, /* Read directory contents. */ + .iterate = ntfs_readdir, /* Read directory contents. */ #ifdef NTFS_RW .fsync = ntfs_dir_fsync, /* Sync a directory to disk. */ /*.aio_fsync = ,*/ /* Sync all outstanding async diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c index b8a9d87231b1..17e6bdde96c5 100644 --- a/fs/ocfs2/alloc.c +++ b/fs/ocfs2/alloc.c @@ -5655,7 +5655,7 @@ int ocfs2_remove_btree_range(struct inode *inode, &ref_tree, NULL); if (ret) { mlog_errno(ret); - goto out; + goto bail; } ret = ocfs2_prepare_refcount_change_for_del(inode, @@ -5666,7 +5666,7 @@ int ocfs2_remove_btree_range(struct inode *inode, &extra_blocks); if (ret < 0) { mlog_errno(ret); - goto out; + goto bail; } } @@ -5674,7 +5674,7 @@ int ocfs2_remove_btree_range(struct inode *inode, extra_blocks); if (ret) { mlog_errno(ret); - return ret; + goto bail; } mutex_lock(&tl_inode->i_mutex); @@ -5734,7 +5734,7 @@ out_commit: ocfs2_commit_trans(osb, handle); out: mutex_unlock(&tl_inode->i_mutex); - +bail: if (meta_ac) ocfs2_free_alloc_context(meta_ac); diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c index 20dfec72e903..79736a28d84f 100644 --- a/fs/ocfs2/aops.c +++ b/fs/ocfs2/aops.c @@ -603,11 +603,12 @@ static void ocfs2_dio_end_io(struct kiocb *iocb, * from ext3. PageChecked() bits have been removed as OCFS2 does not * do journalled data. */ -static void ocfs2_invalidatepage(struct page *page, unsigned long offset) +static void ocfs2_invalidatepage(struct page *page, unsigned int offset, + unsigned int length) { journal_t *journal = OCFS2_SB(page->mapping->host->i_sb)->journal->j_journal; - jbd2_journal_invalidatepage(journal, page, offset); + jbd2_journal_invalidatepage(journal, page, offset, length); } static int ocfs2_releasepage(struct page *page, gfp_t wait) diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c index 42252bf64b51..5c1c864e81cc 100644 --- a/fs/ocfs2/cluster/heartbeat.c +++ b/fs/ocfs2/cluster/heartbeat.c @@ -176,7 +176,7 @@ static void o2hb_dead_threshold_set(unsigned int threshold) } } -static int o2hb_global_hearbeat_mode_set(unsigned int hb_mode) +static int o2hb_global_heartbeat_mode_set(unsigned int hb_mode) { int ret = -1; @@ -500,7 +500,7 @@ static int o2hb_issue_node_write(struct o2hb_region *reg, } atomic_inc(&write_wc->wc_num_reqs); - submit_bio(WRITE, bio); + submit_bio(WRITE_SYNC, bio); status = 0; bail: @@ -2271,7 +2271,7 @@ ssize_t o2hb_heartbeat_group_mode_store(struct o2hb_heartbeat_group *group, if (strnicmp(page, o2hb_heartbeat_mode_desc[i], len)) continue; - ret = o2hb_global_hearbeat_mode_set(i); + ret = o2hb_global_heartbeat_mode_set(i); if (!ret) printk(KERN_NOTICE "o2hb: Heartbeat mode set to %s\n", o2hb_heartbeat_mode_desc[i]); @@ -2304,7 +2304,7 @@ static struct configfs_attribute *o2hb_heartbeat_group_attrs[] = { NULL, }; -static struct configfs_item_operations o2hb_hearbeat_group_item_ops = { +static struct configfs_item_operations o2hb_heartbeat_group_item_ops = { .show_attribute = o2hb_heartbeat_group_show, .store_attribute = o2hb_heartbeat_group_store, }; @@ -2316,7 +2316,7 @@ static struct configfs_group_operations o2hb_heartbeat_group_group_ops = { static struct config_item_type o2hb_heartbeat_group_type = { .ct_group_ops = &o2hb_heartbeat_group_group_ops, - .ct_item_ops = &o2hb_hearbeat_group_item_ops, + .ct_item_ops = &o2hb_heartbeat_group_item_ops, .ct_attrs = o2hb_heartbeat_group_attrs, .ct_owner = THIS_MODULE, }; @@ -2389,6 +2389,9 @@ static int o2hb_region_pin(const char *region_uuid) assert_spin_locked(&o2hb_live_lock); list_for_each_entry(reg, &o2hb_all_regions, hr_all_item) { + if (reg->hr_item_dropped) + continue; + uuid = config_item_name(®->hr_item); /* local heartbeat */ @@ -2439,6 +2442,9 @@ static void o2hb_region_unpin(const char *region_uuid) assert_spin_locked(&o2hb_live_lock); list_for_each_entry(reg, &o2hb_all_regions, hr_all_item) { + if (reg->hr_item_dropped) + continue; + uuid = config_item_name(®->hr_item); if (region_uuid) { if (strcmp(region_uuid, uuid)) @@ -2654,6 +2660,9 @@ int o2hb_get_all_regions(char *region_uuids, u8 max_regions) p = region_uuids; list_for_each_entry(reg, &o2hb_all_regions, hr_all_item) { + if (reg->hr_item_dropped) + continue; + mlog(0, "Region: %s\n", config_item_name(®->hr_item)); if (numregs < max_regions) { memcpy(p, config_item_name(®->hr_item), diff --git a/fs/ocfs2/cluster/quorum.c b/fs/ocfs2/cluster/quorum.c index c19897d0fe14..1ec141e758d7 100644 --- a/fs/ocfs2/cluster/quorum.c +++ b/fs/ocfs2/cluster/quorum.c @@ -264,7 +264,7 @@ void o2quo_hb_still_up(u8 node) /* This is analogous to hb_up. as a node's connection comes up we delay the * quorum decision until we see it heartbeating. the hold will be droped in * hb_up or hb_down. it might be perpetuated by con_err until hb_down. if - * it's already heartbeating we we might be dropping a hold that conn_up got. + * it's already heartbeating we might be dropping a hold that conn_up got. * */ void o2quo_conn_up(u8 node) { diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c index aa88bd8bcedc..d644dc611425 100644 --- a/fs/ocfs2/cluster/tcp.c +++ b/fs/ocfs2/cluster/tcp.c @@ -406,6 +406,9 @@ static void sc_kref_release(struct kref *kref) sc->sc_node = NULL; o2net_debug_del_sc(sc); + + if (sc->sc_page) + __free_page(sc->sc_page); kfree(sc); } @@ -630,19 +633,19 @@ static void o2net_state_change(struct sock *sk) state_change = sc->sc_state_change; switch(sk->sk_state) { - /* ignore connecting sockets as they make progress */ - case TCP_SYN_SENT: - case TCP_SYN_RECV: - break; - case TCP_ESTABLISHED: - o2net_sc_queue_work(sc, &sc->sc_connect_work); - break; - default: - printk(KERN_INFO "o2net: Connection to " SC_NODEF_FMT - " shutdown, state %d\n", - SC_NODEF_ARGS(sc), sk->sk_state); - o2net_sc_queue_work(sc, &sc->sc_shutdown_work); - break; + /* ignore connecting sockets as they make progress */ + case TCP_SYN_SENT: + case TCP_SYN_RECV: + break; + case TCP_ESTABLISHED: + o2net_sc_queue_work(sc, &sc->sc_connect_work); + break; + default: + printk(KERN_INFO "o2net: Connection to " SC_NODEF_FMT + " shutdown, state %d\n", + SC_NODEF_ARGS(sc), sk->sk_state); + o2net_sc_queue_work(sc, &sc->sc_shutdown_work); + break; } out: read_unlock(&sk->sk_callback_lock); diff --git a/fs/ocfs2/dir.c b/fs/ocfs2/dir.c index f1e1aed8f638..eb760d8acd50 100644 --- a/fs/ocfs2/dir.c +++ b/fs/ocfs2/dir.c @@ -1761,11 +1761,10 @@ bail: static int ocfs2_dir_foreach_blk_id(struct inode *inode, u64 *f_version, - loff_t *f_pos, void *priv, - filldir_t filldir, int *filldir_err) + struct dir_context *ctx) { - int ret, i, filldir_ret; - unsigned long offset = *f_pos; + int ret, i; + unsigned long offset = ctx->pos; struct buffer_head *di_bh = NULL; struct ocfs2_dinode *di; struct ocfs2_inline_data *data; @@ -1781,8 +1780,7 @@ static int ocfs2_dir_foreach_blk_id(struct inode *inode, di = (struct ocfs2_dinode *)di_bh->b_data; data = &di->id2.i_data; - while (*f_pos < i_size_read(inode)) { -revalidate: + while (ctx->pos < i_size_read(inode)) { /* If the dir block has changed since the last call to * readdir(2), then we might be pointing to an invalid * dirent right now. Scan from the start of the block @@ -1802,50 +1800,31 @@ revalidate: break; i += le16_to_cpu(de->rec_len); } - *f_pos = offset = i; + ctx->pos = offset = i; *f_version = inode->i_version; } - de = (struct ocfs2_dir_entry *) (data->id_data + *f_pos); - if (!ocfs2_check_dir_entry(inode, de, di_bh, *f_pos)) { + de = (struct ocfs2_dir_entry *) (data->id_data + ctx->pos); + if (!ocfs2_check_dir_entry(inode, de, di_bh, ctx->pos)) { /* On error, skip the f_pos to the end. */ - *f_pos = i_size_read(inode); - goto out; + ctx->pos = i_size_read(inode); + break; } offset += le16_to_cpu(de->rec_len); if (le64_to_cpu(de->inode)) { - /* We might block in the next section - * if the data destination is - * currently swapped out. So, use a - * version stamp to detect whether or - * not the directory has been modified - * during the copy operation. - */ - u64 version = *f_version; unsigned char d_type = DT_UNKNOWN; if (de->file_type < OCFS2_FT_MAX) d_type = ocfs2_filetype_table[de->file_type]; - filldir_ret = filldir(priv, de->name, - de->name_len, - *f_pos, - le64_to_cpu(de->inode), - d_type); - if (filldir_ret) { - if (filldir_err) - *filldir_err = filldir_ret; - break; - } - if (version != *f_version) - goto revalidate; + if (!dir_emit(ctx, de->name, de->name_len, + le64_to_cpu(de->inode), d_type)) + goto out; } - *f_pos += le16_to_cpu(de->rec_len); + ctx->pos += le16_to_cpu(de->rec_len); } - out: brelse(di_bh); - return 0; } @@ -1855,27 +1834,26 @@ out: */ static int ocfs2_dir_foreach_blk_el(struct inode *inode, u64 *f_version, - loff_t *f_pos, void *priv, - filldir_t filldir, int *filldir_err) + struct dir_context *ctx, + bool persist) { - int error = 0; unsigned long offset, blk, last_ra_blk = 0; - int i, stored; + int i; struct buffer_head * bh, * tmp; struct ocfs2_dir_entry * de; struct super_block * sb = inode->i_sb; unsigned int ra_sectors = 16; + int stored = 0; - stored = 0; bh = NULL; - offset = (*f_pos) & (sb->s_blocksize - 1); + offset = ctx->pos & (sb->s_blocksize - 1); - while (!error && !stored && *f_pos < i_size_read(inode)) { - blk = (*f_pos) >> sb->s_blocksize_bits; + while (ctx->pos < i_size_read(inode)) { + blk = ctx->pos >> sb->s_blocksize_bits; if (ocfs2_read_dir_block(inode, blk, &bh, 0)) { /* Skip the corrupt dirblock and keep trying */ - *f_pos += sb->s_blocksize - offset; + ctx->pos += sb->s_blocksize - offset; continue; } @@ -1897,7 +1875,6 @@ static int ocfs2_dir_foreach_blk_el(struct inode *inode, ra_sectors = 8; } -revalidate: /* If the dir block has changed since the last call to * readdir(2), then we might be pointing to an invalid * dirent right now. Scan from the start of the block @@ -1917,93 +1894,64 @@ revalidate: i += le16_to_cpu(de->rec_len); } offset = i; - *f_pos = ((*f_pos) & ~(sb->s_blocksize - 1)) + ctx->pos = (ctx->pos & ~(sb->s_blocksize - 1)) | offset; *f_version = inode->i_version; } - while (!error && *f_pos < i_size_read(inode) + while (ctx->pos < i_size_read(inode) && offset < sb->s_blocksize) { de = (struct ocfs2_dir_entry *) (bh->b_data + offset); if (!ocfs2_check_dir_entry(inode, de, bh, offset)) { /* On error, skip the f_pos to the next block. */ - *f_pos = ((*f_pos) | (sb->s_blocksize - 1)) + 1; + ctx->pos = (ctx->pos | (sb->s_blocksize - 1)) + 1; brelse(bh); - goto out; + continue; } - offset += le16_to_cpu(de->rec_len); if (le64_to_cpu(de->inode)) { - /* We might block in the next section - * if the data destination is - * currently swapped out. So, use a - * version stamp to detect whether or - * not the directory has been modified - * during the copy operation. - */ - unsigned long version = *f_version; unsigned char d_type = DT_UNKNOWN; if (de->file_type < OCFS2_FT_MAX) d_type = ocfs2_filetype_table[de->file_type]; - error = filldir(priv, de->name, + if (!dir_emit(ctx, de->name, de->name_len, - *f_pos, le64_to_cpu(de->inode), - d_type); - if (error) { - if (filldir_err) - *filldir_err = error; - break; + d_type)) { + brelse(bh); + return 0; } - if (version != *f_version) - goto revalidate; - stored ++; + stored++; } - *f_pos += le16_to_cpu(de->rec_len); + offset += le16_to_cpu(de->rec_len); + ctx->pos += le16_to_cpu(de->rec_len); } offset = 0; brelse(bh); bh = NULL; + if (!persist && stored) + break; } - - stored = 0; -out: - return stored; + return 0; } static int ocfs2_dir_foreach_blk(struct inode *inode, u64 *f_version, - loff_t *f_pos, void *priv, filldir_t filldir, - int *filldir_err) + struct dir_context *ctx, + bool persist) { if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) - return ocfs2_dir_foreach_blk_id(inode, f_version, f_pos, priv, - filldir, filldir_err); - - return ocfs2_dir_foreach_blk_el(inode, f_version, f_pos, priv, filldir, - filldir_err); + return ocfs2_dir_foreach_blk_id(inode, f_version, ctx); + return ocfs2_dir_foreach_blk_el(inode, f_version, ctx, persist); } /* * This is intended to be called from inside other kernel functions, * so we fake some arguments. */ -int ocfs2_dir_foreach(struct inode *inode, loff_t *f_pos, void *priv, - filldir_t filldir) +int ocfs2_dir_foreach(struct inode *inode, struct dir_context *ctx) { - int ret = 0, filldir_err = 0; u64 version = inode->i_version; - - while (*f_pos < i_size_read(inode)) { - ret = ocfs2_dir_foreach_blk(inode, &version, f_pos, priv, - filldir, &filldir_err); - if (ret || filldir_err) - break; - } - - if (ret > 0) - ret = -EIO; - + ocfs2_dir_foreach_blk(inode, &version, ctx, true); return 0; } @@ -2011,15 +1959,15 @@ int ocfs2_dir_foreach(struct inode *inode, loff_t *f_pos, void *priv, * ocfs2_readdir() * */ -int ocfs2_readdir(struct file * filp, void * dirent, filldir_t filldir) +int ocfs2_readdir(struct file *file, struct dir_context *ctx) { int error = 0; - struct inode *inode = file_inode(filp); + struct inode *inode = file_inode(file); int lock_level = 0; trace_ocfs2_readdir((unsigned long long)OCFS2_I(inode)->ip_blkno); - error = ocfs2_inode_lock_atime(inode, filp->f_path.mnt, &lock_level); + error = ocfs2_inode_lock_atime(inode, file->f_path.mnt, &lock_level); if (lock_level && error >= 0) { /* We release EX lock which used to update atime * and get PR lock again to reduce contention @@ -2035,8 +1983,7 @@ int ocfs2_readdir(struct file * filp, void * dirent, filldir_t filldir) goto bail_nolock; } - error = ocfs2_dir_foreach_blk(inode, &filp->f_version, &filp->f_pos, - dirent, filldir, NULL); + error = ocfs2_dir_foreach_blk(inode, &file->f_version, ctx, false); ocfs2_inode_unlock(inode, lock_level); if (error) @@ -2120,6 +2067,7 @@ bail: } struct ocfs2_empty_dir_priv { + struct dir_context ctx; unsigned seen_dot; unsigned seen_dot_dot; unsigned seen_other; @@ -2204,8 +2152,9 @@ out: int ocfs2_empty_dir(struct inode *inode) { int ret; - loff_t start = 0; - struct ocfs2_empty_dir_priv priv; + struct ocfs2_empty_dir_priv priv = { + .ctx.actor = ocfs2_empty_dir_filldir + }; memset(&priv, 0, sizeof(priv)); @@ -2219,7 +2168,7 @@ int ocfs2_empty_dir(struct inode *inode) */ } - ret = ocfs2_dir_foreach(inode, &start, &priv, ocfs2_empty_dir_filldir); + ret = ocfs2_dir_foreach(inode, &priv.ctx); if (ret) mlog_errno(ret); diff --git a/fs/ocfs2/dir.h b/fs/ocfs2/dir.h index e683f3deb645..f0344b75b14d 100644 --- a/fs/ocfs2/dir.h +++ b/fs/ocfs2/dir.h @@ -92,9 +92,8 @@ int ocfs2_find_files_on_disk(const char *name, struct ocfs2_dir_lookup_result *res); int ocfs2_lookup_ino_from_name(struct inode *dir, const char *name, int namelen, u64 *blkno); -int ocfs2_readdir(struct file *filp, void *dirent, filldir_t filldir); -int ocfs2_dir_foreach(struct inode *inode, loff_t *f_pos, void *priv, - filldir_t filldir); +int ocfs2_readdir(struct file *file, struct dir_context *ctx); +int ocfs2_dir_foreach(struct inode *inode, struct dir_context *ctx); int ocfs2_prepare_dir_for_insert(struct ocfs2_super *osb, struct inode *dir, struct buffer_head *parent_fe_bh, diff --git a/fs/ocfs2/dlm/dlmlock.c b/fs/ocfs2/dlm/dlmlock.c index 975810b98492..47e67c2d228f 100644 --- a/fs/ocfs2/dlm/dlmlock.c +++ b/fs/ocfs2/dlm/dlmlock.c @@ -178,6 +178,7 @@ static enum dlm_status dlmlock_master(struct dlm_ctxt *dlm, lock->ml.node); } } else { + status = DLM_NORMAL; dlm_lock_get(lock); list_add_tail(&lock->list, &res->blocked); kick_thread = 1; diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c index b3fdd1a323d6..773bd32bfd8c 100644 --- a/fs/ocfs2/dlm/dlmrecovery.c +++ b/fs/ocfs2/dlm/dlmrecovery.c @@ -55,9 +55,6 @@ static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node); static int dlm_recovery_thread(void *data); -void dlm_complete_recovery_thread(struct dlm_ctxt *dlm); -int dlm_launch_recovery_thread(struct dlm_ctxt *dlm); -void dlm_kick_recovery_thread(struct dlm_ctxt *dlm); static int dlm_do_recovery(struct dlm_ctxt *dlm); static int dlm_pick_recovery_master(struct dlm_ctxt *dlm); @@ -789,7 +786,7 @@ static int dlm_request_all_locks(struct dlm_ctxt *dlm, u8 request_from, u8 dead_node) { struct dlm_lock_request lr; - enum dlm_status ret; + int ret; mlog(0, "\n"); @@ -802,7 +799,6 @@ static int dlm_request_all_locks(struct dlm_ctxt *dlm, u8 request_from, lr.dead_node = dead_node; // send message - ret = DLM_NOLOCKMGR; ret = o2net_send_message(DLM_LOCK_REQUEST_MSG, dlm->key, &lr, sizeof(lr), request_from, NULL); @@ -1408,6 +1404,7 @@ int dlm_mig_lockres_handler(struct o2net_msg *msg, u32 len, void *data, mres->lockname_len, mres->lockname); ret = -EFAULT; spin_unlock(&res->spinlock); + dlm_lockres_put(res); goto leave; } res->state |= DLM_LOCK_RES_MIGRATING; @@ -2695,6 +2692,7 @@ int dlm_begin_reco_handler(struct o2net_msg *msg, u32 len, void *data, dlm->name, br->node_idx, br->dead_node, dlm->reco.dead_node, dlm->reco.new_master); spin_unlock(&dlm->spinlock); + dlm_put(dlm); return -EAGAIN; } spin_unlock(&dlm->spinlock); diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c index ff54014a24ec..41000f223ca4 100644 --- a/fs/ocfs2/file.c +++ b/fs/ocfs2/file.c @@ -2646,17 +2646,7 @@ static loff_t ocfs2_file_llseek(struct file *file, loff_t offset, int whence) goto out; } - if (offset < 0 && !(file->f_mode & FMODE_UNSIGNED_OFFSET)) - ret = -EINVAL; - if (!ret && offset > inode->i_sb->s_maxbytes) - ret = -EINVAL; - if (ret) - goto out; - - if (offset != file->f_pos) { - file->f_pos = offset; - file->f_version = 0; - } + offset = vfs_setpos(file, offset, inode->i_sb->s_maxbytes); out: mutex_unlock(&inode->i_mutex); @@ -2712,7 +2702,7 @@ const struct file_operations ocfs2_fops = { const struct file_operations ocfs2_dops = { .llseek = generic_file_llseek, .read = generic_read_dir, - .readdir = ocfs2_readdir, + .iterate = ocfs2_readdir, .fsync = ocfs2_sync_file, .release = ocfs2_dir_release, .open = ocfs2_dir_open, @@ -2759,7 +2749,7 @@ const struct file_operations ocfs2_fops_no_plocks = { const struct file_operations ocfs2_dops_no_plocks = { .llseek = generic_file_llseek, .read = generic_read_dir, - .readdir = ocfs2_readdir, + .iterate = ocfs2_readdir, .fsync = ocfs2_sync_file, .release = ocfs2_dir_release, .open = ocfs2_dir_open, diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c index 8eccfabcd12e..242170d83971 100644 --- a/fs/ocfs2/journal.c +++ b/fs/ocfs2/journal.c @@ -1941,6 +1941,7 @@ void ocfs2_orphan_scan_start(struct ocfs2_super *osb) } struct ocfs2_orphan_filldir_priv { + struct dir_context ctx; struct inode *head; struct ocfs2_super *osb; }; @@ -1977,11 +1978,11 @@ static int ocfs2_queue_orphans(struct ocfs2_super *osb, { int status; struct inode *orphan_dir_inode = NULL; - struct ocfs2_orphan_filldir_priv priv; - loff_t pos = 0; - - priv.osb = osb; - priv.head = *head; + struct ocfs2_orphan_filldir_priv priv = { + .ctx.actor = ocfs2_orphan_filldir, + .osb = osb, + .head = *head + }; orphan_dir_inode = ocfs2_get_system_file_inode(osb, ORPHAN_DIR_SYSTEM_INODE, @@ -1999,8 +2000,7 @@ static int ocfs2_queue_orphans(struct ocfs2_super *osb, goto out; } - status = ocfs2_dir_foreach(orphan_dir_inode, &pos, &priv, - ocfs2_orphan_filldir); + status = ocfs2_dir_foreach(orphan_dir_inode, &priv.ctx); if (status) { mlog_errno(status); goto out_cluster; diff --git a/fs/ocfs2/journal.h b/fs/ocfs2/journal.h index a3385b63ff5e..96f9ac237e86 100644 --- a/fs/ocfs2/journal.h +++ b/fs/ocfs2/journal.h @@ -200,7 +200,6 @@ void ocfs2_complete_quota_recovery(struct ocfs2_super *osb); static inline void ocfs2_start_checkpoint(struct ocfs2_super *osb) { - atomic_set(&osb->needs_checkpoint, 1); wake_up(&osb->checkpoint_event); } @@ -538,7 +537,7 @@ static inline int ocfs2_calc_extend_credits(struct super_block *sb, extent_blocks = 1 + 1 + le16_to_cpu(root_el->l_tree_depth); return bitmap_blocks + sysfile_bitmap_blocks + extent_blocks + - ocfs2_quota_trans_credits(sb); + ocfs2_quota_trans_credits(sb) + bits_wanted; } static inline int ocfs2_calc_symlink_credits(struct super_block *sb) diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c index 04ee1b57c243..be3f8676a438 100644 --- a/fs/ocfs2/namei.c +++ b/fs/ocfs2/namei.c @@ -522,7 +522,7 @@ static int __ocfs2_mknod_locked(struct inode *dir, fe->i_last_eb_blk = 0; strcpy(fe->i_signature, OCFS2_INODE_SIGNATURE); - le32_add_cpu(&fe->i_flags, OCFS2_VALID_FL); + fe->i_flags |= cpu_to_le32(OCFS2_VALID_FL); fe->i_atime = fe->i_ctime = fe->i_mtime = cpu_to_le64(CURRENT_TIME.tv_sec); fe->i_mtime_nsec = fe->i_ctime_nsec = fe->i_atime_nsec = @@ -773,7 +773,7 @@ static int ocfs2_remote_dentry_delete(struct dentry *dentry) return ret; } -static inline int inode_is_unlinkable(struct inode *inode) +static inline int ocfs2_inode_is_unlinkable(struct inode *inode) { if (S_ISDIR(inode->i_mode)) { if (inode->i_nlink == 2) @@ -791,6 +791,7 @@ static int ocfs2_unlink(struct inode *dir, { int status; int child_locked = 0; + bool is_unlinkable = false; struct inode *inode = dentry->d_inode; struct inode *orphan_dir = NULL; struct ocfs2_super *osb = OCFS2_SB(dir->i_sb); @@ -865,7 +866,7 @@ static int ocfs2_unlink(struct inode *dir, goto leave; } - if (inode_is_unlinkable(inode)) { + if (ocfs2_inode_is_unlinkable(inode)) { status = ocfs2_prepare_orphan_dir(osb, &orphan_dir, OCFS2_I(inode)->ip_blkno, orphan_name, &orphan_insert); @@ -873,6 +874,7 @@ static int ocfs2_unlink(struct inode *dir, mlog_errno(status); goto leave; } + is_unlinkable = true; } handle = ocfs2_start_trans(osb, ocfs2_unlink_credits(osb->sb)); @@ -892,15 +894,6 @@ static int ocfs2_unlink(struct inode *dir, fe = (struct ocfs2_dinode *) fe_bh->b_data; - if (inode_is_unlinkable(inode)) { - status = ocfs2_orphan_add(osb, handle, inode, fe_bh, orphan_name, - &orphan_insert, orphan_dir); - if (status < 0) { - mlog_errno(status); - goto leave; - } - } - /* delete the name from the parent dir */ status = ocfs2_delete_entry(handle, dir, &lookup); if (status < 0) { @@ -923,6 +916,14 @@ static int ocfs2_unlink(struct inode *dir, mlog_errno(status); if (S_ISDIR(inode->i_mode)) inc_nlink(dir); + goto leave; + } + + if (is_unlinkable) { + status = ocfs2_orphan_add(osb, handle, inode, fe_bh, + orphan_name, &orphan_insert, orphan_dir); + if (status < 0) + mlog_errno(status); } leave: @@ -947,7 +948,7 @@ leave: ocfs2_free_dir_lookup_result(&orphan_insert); ocfs2_free_dir_lookup_result(&lookup); - if (status) + if (status && (status != -ENOTEMPTY)) mlog_errno(status); return status; @@ -2012,6 +2013,21 @@ static int ocfs2_orphan_add(struct ocfs2_super *osb, goto leave; } + /* + * We're going to journal the change of i_flags and i_orphaned_slot. + * It's safe anyway, though some callers may duplicate the journaling. + * Journaling within the func just make the logic look more + * straightforward. + */ + status = ocfs2_journal_access_di(handle, + INODE_CACHE(inode), + fe_bh, + OCFS2_JOURNAL_ACCESS_WRITE); + if (status < 0) { + mlog_errno(status); + goto leave; + } + /* we're a cluster, and nlink can change on disk from * underneath us... */ orphan_fe = (struct ocfs2_dinode *) orphan_dir_bh->b_data; @@ -2026,25 +2042,10 @@ static int ocfs2_orphan_add(struct ocfs2_super *osb, orphan_dir_bh, lookup); if (status < 0) { mlog_errno(status); - goto leave; + goto rollback; } - /* - * We're going to journal the change of i_flags and i_orphaned_slot. - * It's safe anyway, though some callers may duplicate the journaling. - * Journaling within the func just make the logic look more - * straightforward. - */ - status = ocfs2_journal_access_di(handle, - INODE_CACHE(inode), - fe_bh, - OCFS2_JOURNAL_ACCESS_WRITE); - if (status < 0) { - mlog_errno(status); - goto leave; - } - - le32_add_cpu(&fe->i_flags, OCFS2_ORPHANED_FL); + fe->i_flags |= cpu_to_le32(OCFS2_ORPHANED_FL); OCFS2_I(inode)->ip_flags &= ~OCFS2_INODE_SKIP_ORPHAN_DIR; /* Record which orphan dir our inode now resides @@ -2057,11 +2058,16 @@ static int ocfs2_orphan_add(struct ocfs2_super *osb, trace_ocfs2_orphan_add_end((unsigned long long)OCFS2_I(inode)->ip_blkno, osb->slot_num); +rollback: + if (status < 0) { + if (S_ISDIR(inode->i_mode)) + ocfs2_add_links_count(orphan_fe, -1); + set_nlink(orphan_dir_inode, ocfs2_read_links_count(orphan_fe)); + } + leave: brelse(orphan_dir_bh); - if (status) - mlog_errno(status); return status; } @@ -2216,7 +2222,7 @@ out: brelse(orphan_dir_bh); - return 0; + return ret; } int ocfs2_create_inode_in_orphan(struct inode *dir, @@ -2434,7 +2440,7 @@ int ocfs2_mv_orphaned_inode_to_new(struct inode *dir, } di = (struct ocfs2_dinode *)di_bh->b_data; - le32_add_cpu(&di->i_flags, -OCFS2_ORPHANED_FL); + di->i_flags &= ~cpu_to_le32(OCFS2_ORPHANED_FL); di->i_orphaned_slot = 0; set_nlink(inode, 1); ocfs2_set_links_count(di, inode->i_nlink); diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h index d355e6e36b36..3a903470c794 100644 --- a/fs/ocfs2/ocfs2.h +++ b/fs/ocfs2/ocfs2.h @@ -347,7 +347,6 @@ struct ocfs2_super struct task_struct *recovery_thread_task; int disable_recovery; wait_queue_head_t checkpoint_event; - atomic_t needs_checkpoint; struct ocfs2_journal *journal; unsigned long osb_commit_interval; diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c index b7e74b580c0f..5397c07ce608 100644 --- a/fs/ocfs2/suballoc.c +++ b/fs/ocfs2/suballoc.c @@ -1422,7 +1422,7 @@ static int ocfs2_relink_block_group(handle_t *handle, int status; /* there is a really tiny chance the journal calls could fail, * but we wouldn't want inconsistent blocks in *any* case. */ - u64 fe_ptr, bg_ptr, prev_bg_ptr; + u64 bg_ptr, prev_bg_ptr; struct ocfs2_dinode *fe = (struct ocfs2_dinode *) fe_bh->b_data; struct ocfs2_group_desc *bg = (struct ocfs2_group_desc *) bg_bh->b_data; struct ocfs2_group_desc *prev_bg = (struct ocfs2_group_desc *) prev_bg_bh->b_data; @@ -1437,51 +1437,44 @@ static int ocfs2_relink_block_group(handle_t *handle, (unsigned long long)le64_to_cpu(bg->bg_blkno), (unsigned long long)le64_to_cpu(prev_bg->bg_blkno)); - fe_ptr = le64_to_cpu(fe->id2.i_chain.cl_recs[chain].c_blkno); bg_ptr = le64_to_cpu(bg->bg_next_group); prev_bg_ptr = le64_to_cpu(prev_bg->bg_next_group); status = ocfs2_journal_access_gd(handle, INODE_CACHE(alloc_inode), prev_bg_bh, OCFS2_JOURNAL_ACCESS_WRITE); - if (status < 0) { - mlog_errno(status); - goto out_rollback; - } + if (status < 0) + goto out; prev_bg->bg_next_group = bg->bg_next_group; ocfs2_journal_dirty(handle, prev_bg_bh); status = ocfs2_journal_access_gd(handle, INODE_CACHE(alloc_inode), bg_bh, OCFS2_JOURNAL_ACCESS_WRITE); - if (status < 0) { - mlog_errno(status); - goto out_rollback; - } + if (status < 0) + goto out_rollback_prev_bg; bg->bg_next_group = fe->id2.i_chain.cl_recs[chain].c_blkno; ocfs2_journal_dirty(handle, bg_bh); status = ocfs2_journal_access_di(handle, INODE_CACHE(alloc_inode), fe_bh, OCFS2_JOURNAL_ACCESS_WRITE); - if (status < 0) { - mlog_errno(status); - goto out_rollback; - } + if (status < 0) + goto out_rollback_bg; fe->id2.i_chain.cl_recs[chain].c_blkno = bg->bg_blkno; ocfs2_journal_dirty(handle, fe_bh); -out_rollback: - if (status < 0) { - fe->id2.i_chain.cl_recs[chain].c_blkno = cpu_to_le64(fe_ptr); - bg->bg_next_group = cpu_to_le64(bg_ptr); - prev_bg->bg_next_group = cpu_to_le64(prev_bg_ptr); - } - - if (status) +out: + if (status < 0) mlog_errno(status); return status; + +out_rollback_bg: + bg->bg_next_group = cpu_to_le64(bg_ptr); +out_rollback_prev_bg: + prev_bg->bg_next_group = cpu_to_le64(prev_bg_ptr); + goto out; } static inline int ocfs2_block_group_reasonably_empty(struct ocfs2_group_desc *bg, diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c index 01b85165552b..854d80955bf8 100644 --- a/fs/ocfs2/super.c +++ b/fs/ocfs2/super.c @@ -286,10 +286,9 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len) spin_unlock(&osb->osb_lock); out += snprintf(buf + out, len - out, - "%10s => Pid: %d Interval: %lu Needs: %d\n", "Commit", + "%10s => Pid: %d Interval: %lu\n", "Commit", (osb->commit_task ? task_pid_nr(osb->commit_task) : -1), - osb->osb_commit_interval, - atomic_read(&osb->needs_checkpoint)); + osb->osb_commit_interval); out += snprintf(buf + out, len - out, "%10s => State: %d TxnId: %lu NumTxns: %d\n", @@ -2154,7 +2153,6 @@ static int ocfs2_initialize_super(struct super_block *sb, } init_waitqueue_head(&osb->checkpoint_event); - atomic_set(&osb->needs_checkpoint, 0); osb->s_atime_quantum = OCFS2_DEFAULT_ATIME_QUANTUM; diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c index 2e3ea308c144..317ef0abccbb 100644 --- a/fs/ocfs2/xattr.c +++ b/fs/ocfs2/xattr.c @@ -2751,7 +2751,6 @@ static int ocfs2_xattr_ibody_set(struct inode *inode, { int ret; struct ocfs2_inode_info *oi = OCFS2_I(inode); - struct ocfs2_dinode *di = (struct ocfs2_dinode *)xs->inode_bh->b_data; struct ocfs2_xa_loc loc; if (inode->i_sb->s_blocksize == OCFS2_MIN_BLOCKSIZE) @@ -2759,13 +2758,6 @@ static int ocfs2_xattr_ibody_set(struct inode *inode, down_write(&oi->ip_alloc_sem); if (!(oi->ip_dyn_features & OCFS2_INLINE_XATTR_FL)) { - if (!ocfs2_xattr_has_space_inline(inode, di)) { - ret = -ENOSPC; - goto out; - } - } - - if (!(oi->ip_dyn_features & OCFS2_INLINE_XATTR_FL)) { ret = ocfs2_xattr_ibody_init(inode, xs->inode_bh, ctxt); if (ret) { if (ret != -ENOSPC) @@ -6499,6 +6491,16 @@ static int ocfs2_reflink_xattr_inline(struct ocfs2_xattr_reflink *args) } new_oi = OCFS2_I(args->new_inode); + /* + * Adjust extent record count to reserve space for extended attribute. + * Inline data count had been adjusted in ocfs2_duplicate_inline_data(). + */ + if (!(new_oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) && + !(ocfs2_inode_is_fast_symlink(args->new_inode))) { + struct ocfs2_extent_list *el = &new_di->id2.i_list; + le16_add_cpu(&el->l_count, -(inline_size / + sizeof(struct ocfs2_extent_rec))); + } spin_lock(&new_oi->ip_lock); new_oi->ip_dyn_features |= OCFS2_HAS_XATTR_FL | OCFS2_INLINE_XATTR_FL; new_di->i_dyn_features = cpu_to_le16(new_oi->ip_dyn_features); diff --git a/fs/omfs/dir.c b/fs/omfs/dir.c index acbaebcad3a8..1b8e9e8405b2 100644 --- a/fs/omfs/dir.c +++ b/fs/omfs/dir.c @@ -327,26 +327,23 @@ int omfs_is_bad(struct omfs_sb_info *sbi, struct omfs_header *header, return is_bad; } -static int omfs_fill_chain(struct file *filp, void *dirent, filldir_t filldir, +static bool omfs_fill_chain(struct inode *dir, struct dir_context *ctx, u64 fsblock, int hindex) { - struct inode *dir = file_inode(filp); - struct buffer_head *bh; - struct omfs_inode *oi; - u64 self; - int res = 0; - unsigned char d_type; - /* follow chain in this bucket */ while (fsblock != ~0) { - bh = omfs_bread(dir->i_sb, fsblock); + struct buffer_head *bh = omfs_bread(dir->i_sb, fsblock); + struct omfs_inode *oi; + u64 self; + unsigned char d_type; + if (!bh) - goto out; + return true; oi = (struct omfs_inode *) bh->b_data; if (omfs_is_bad(OMFS_SB(dir->i_sb), &oi->i_head, fsblock)) { brelse(bh); - goto out; + return true; } self = fsblock; @@ -361,15 +358,16 @@ static int omfs_fill_chain(struct file *filp, void *dirent, filldir_t filldir, d_type = (oi->i_type == OMFS_DIR) ? DT_DIR : DT_REG; - res = filldir(dirent, oi->i_name, strnlen(oi->i_name, - OMFS_NAMELEN), filp->f_pos, self, d_type); + if (!dir_emit(ctx, oi->i_name, + strnlen(oi->i_name, OMFS_NAMELEN), + self, d_type)) { + brelse(bh); + return false; + } brelse(bh); - if (res < 0) - break; - filp->f_pos++; + ctx->pos++; } -out: - return res; + return true; } static int omfs_rename(struct inode *old_dir, struct dentry *old_dentry, @@ -403,60 +401,44 @@ out: return err; } -static int omfs_readdir(struct file *filp, void *dirent, filldir_t filldir) +static int omfs_readdir(struct file *file, struct dir_context *ctx) { - struct inode *dir = file_inode(filp); + struct inode *dir = file_inode(file); struct buffer_head *bh; - loff_t offset, res; + __be64 *p; unsigned int hchain, hindex; int nbuckets; - u64 fsblock; - int ret = -EINVAL; - - if (filp->f_pos >> 32) - goto success; - - switch ((unsigned long) filp->f_pos) { - case 0: - if (filldir(dirent, ".", 1, 0, dir->i_ino, DT_DIR) < 0) - goto success; - filp->f_pos++; - /* fall through */ - case 1: - if (filldir(dirent, "..", 2, 1, - parent_ino(filp->f_dentry), DT_DIR) < 0) - goto success; - filp->f_pos = 1 << 20; - /* fall through */ + + if (ctx->pos >> 32) + return -EINVAL; + + if (ctx->pos < 1 << 20) { + if (!dir_emit_dots(file, ctx)) + return 0; + ctx->pos = 1 << 20; } nbuckets = (dir->i_size - OMFS_DIR_START) / 8; /* high 12 bits store bucket + 1 and low 20 bits store hash index */ - hchain = (filp->f_pos >> 20) - 1; - hindex = filp->f_pos & 0xfffff; + hchain = (ctx->pos >> 20) - 1; + hindex = ctx->pos & 0xfffff; bh = omfs_bread(dir->i_sb, dir->i_ino); if (!bh) - goto out; + return -EINVAL; - offset = OMFS_DIR_START + hchain * 8; + p = (__be64 *)(bh->b_data + OMFS_DIR_START) + hchain; - for (; hchain < nbuckets; hchain++, offset += 8) { - fsblock = be64_to_cpu(*((__be64 *) &bh->b_data[offset])); - - res = omfs_fill_chain(filp, dirent, filldir, fsblock, hindex); - hindex = 0; - if (res < 0) + for (; hchain < nbuckets; hchain++) { + __u64 fsblock = be64_to_cpu(*p++); + if (!omfs_fill_chain(dir, ctx, fsblock, hindex)) break; - - filp->f_pos = (hchain+2) << 20; + hindex = 0; + ctx->pos = (hchain+2) << 20; } brelse(bh); -success: - ret = 0; -out: - return ret; + return 0; } const struct inode_operations omfs_dir_inops = { @@ -470,6 +452,6 @@ const struct inode_operations omfs_dir_inops = { const struct file_operations omfs_dir_operations = { .read = generic_read_dir, - .readdir = omfs_readdir, + .iterate = omfs_readdir, .llseek = generic_file_llseek, }; diff --git a/fs/open.c b/fs/open.c index 8c741002f947..fca72c4d3f17 100644 --- a/fs/open.c +++ b/fs/open.c @@ -840,11 +840,15 @@ static inline int build_open_flags(int flags, umode_t mode, struct open_flags *o if (flags & __O_SYNC) flags |= O_DSYNC; - /* - * If we have O_PATH in the open flag. Then we - * cannot have anything other than the below set of flags - */ - if (flags & O_PATH) { + if (flags & O_TMPFILE) { + if (!(flags & O_CREAT)) + return -EINVAL; + acc_mode = MAY_OPEN | ACC_MODE(flags); + } else if (flags & O_PATH) { + /* + * If we have O_PATH in the open flag. Then we + * cannot have anything other than the below set of flags + */ flags &= O_DIRECTORY | O_NOFOLLOW | O_PATH; acc_mode = 0; } else { @@ -876,7 +880,8 @@ static inline int build_open_flags(int flags, umode_t mode, struct open_flags *o lookup_flags |= LOOKUP_DIRECTORY; if (!(flags & O_NOFOLLOW)) lookup_flags |= LOOKUP_FOLLOW; - return lookup_flags; + op->lookup_flags = lookup_flags; + return 0; } /** @@ -893,8 +898,8 @@ static inline int build_open_flags(int flags, umode_t mode, struct open_flags *o struct file *file_open_name(struct filename *name, int flags, umode_t mode) { struct open_flags op; - int lookup = build_open_flags(flags, mode, &op); - return do_filp_open(AT_FDCWD, name, &op, lookup); + int err = build_open_flags(flags, mode, &op); + return err ? ERR_PTR(err) : do_filp_open(AT_FDCWD, name, &op); } /** @@ -919,37 +924,43 @@ struct file *file_open_root(struct dentry *dentry, struct vfsmount *mnt, const char *filename, int flags) { struct open_flags op; - int lookup = build_open_flags(flags, 0, &op); + int err = build_open_flags(flags, 0, &op); + if (err) + return ERR_PTR(err); if (flags & O_CREAT) return ERR_PTR(-EINVAL); if (!filename && (flags & O_DIRECTORY)) if (!dentry->d_inode->i_op->lookup) return ERR_PTR(-ENOTDIR); - return do_file_open_root(dentry, mnt, filename, &op, lookup); + return do_file_open_root(dentry, mnt, filename, &op); } EXPORT_SYMBOL(file_open_root); long do_sys_open(int dfd, const char __user *filename, int flags, umode_t mode) { struct open_flags op; - int lookup = build_open_flags(flags, mode, &op); - struct filename *tmp = getname(filename); - int fd = PTR_ERR(tmp); - - if (!IS_ERR(tmp)) { - fd = get_unused_fd_flags(flags); - if (fd >= 0) { - struct file *f = do_filp_open(dfd, tmp, &op, lookup); - if (IS_ERR(f)) { - put_unused_fd(fd); - fd = PTR_ERR(f); - } else { - fsnotify_open(f); - fd_install(fd, f); - } + int fd = build_open_flags(flags, mode, &op); + struct filename *tmp; + + if (fd) + return fd; + + tmp = getname(filename); + if (IS_ERR(tmp)) + return PTR_ERR(tmp); + + fd = get_unused_fd_flags(flags); + if (fd >= 0) { + struct file *f = do_filp_open(dfd, tmp, &op); + if (IS_ERR(f)) { + put_unused_fd(fd); + fd = PTR_ERR(f); + } else { + fsnotify_open(f); + fd_install(fd, f); } - putname(tmp); } + putname(tmp); return fd; } diff --git a/fs/openpromfs/inode.c b/fs/openpromfs/inode.c index 75885ffde44e..8c0ceb8dd1f7 100644 --- a/fs/openpromfs/inode.c +++ b/fs/openpromfs/inode.c @@ -162,11 +162,11 @@ static const struct file_operations openpromfs_prop_ops = { .release = seq_release, }; -static int openpromfs_readdir(struct file *, void *, filldir_t); +static int openpromfs_readdir(struct file *, struct dir_context *); static const struct file_operations openprom_operations = { .read = generic_read_dir, - .readdir = openpromfs_readdir, + .iterate = openpromfs_readdir, .llseek = generic_file_llseek, }; @@ -260,71 +260,64 @@ found: return NULL; } -static int openpromfs_readdir(struct file * filp, void * dirent, filldir_t filldir) +static int openpromfs_readdir(struct file *file, struct dir_context *ctx) { - struct inode *inode = file_inode(filp); + struct inode *inode = file_inode(file); struct op_inode_info *oi = OP_I(inode); struct device_node *dp = oi->u.node; struct device_node *child; struct property *prop; - unsigned int ino; int i; mutex_lock(&op_mutex); - ino = inode->i_ino; - i = filp->f_pos; - switch (i) { - case 0: - if (filldir(dirent, ".", 1, i, ino, DT_DIR) < 0) + if (ctx->pos == 0) { + if (!dir_emit(ctx, ".", 1, inode->i_ino, DT_DIR)) goto out; - i++; - filp->f_pos++; - /* fall thru */ - case 1: - if (filldir(dirent, "..", 2, i, + ctx->pos = 1; + } + if (ctx->pos == 1) { + if (!dir_emit(ctx, "..", 2, (dp->parent == NULL ? OPENPROM_ROOT_INO : - dp->parent->unique_id), DT_DIR) < 0) + dp->parent->unique_id), DT_DIR)) goto out; - i++; - filp->f_pos++; - /* fall thru */ - default: - i -= 2; - - /* First, the children nodes as directories. */ - child = dp->child; - while (i && child) { - child = child->sibling; - i--; - } - while (child) { - if (filldir(dirent, - child->path_component_name, - strlen(child->path_component_name), - filp->f_pos, child->unique_id, DT_DIR) < 0) - goto out; - - filp->f_pos++; - child = child->sibling; - } + ctx->pos = 2; + } + i = ctx->pos - 2; - /* Next, the properties as files. */ - prop = dp->properties; - while (i && prop) { - prop = prop->next; - i--; - } - while (prop) { - if (filldir(dirent, prop->name, strlen(prop->name), - filp->f_pos, prop->unique_id, DT_REG) < 0) - goto out; + /* First, the children nodes as directories. */ + child = dp->child; + while (i && child) { + child = child->sibling; + i--; + } + while (child) { + if (!dir_emit(ctx, + child->path_component_name, + strlen(child->path_component_name), + child->unique_id, DT_DIR)) + goto out; - filp->f_pos++; - prop = prop->next; - } + ctx->pos++; + child = child->sibling; + } + + /* Next, the properties as files. */ + prop = dp->properties; + while (i && prop) { + prop = prop->next; + i--; } + while (prop) { + if (!dir_emit(ctx, prop->name, strlen(prop->name), + prop->unique_id, DT_REG)) + goto out; + + ctx->pos++; + prop = prop->next; + } + out: mutex_unlock(&op_mutex); return 0; diff --git a/fs/proc/base.c b/fs/proc/base.c index c3834dad09b3..1485e38daaa3 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -1681,46 +1681,34 @@ const struct dentry_operations pid_dentry_operations = * reported by readdir in sync with the inode numbers reported * by stat. */ -int proc_fill_cache(struct file *filp, void *dirent, filldir_t filldir, +bool proc_fill_cache(struct file *file, struct dir_context *ctx, const char *name, int len, instantiate_t instantiate, struct task_struct *task, const void *ptr) { - struct dentry *child, *dir = filp->f_path.dentry; + struct dentry *child, *dir = file->f_path.dentry; + struct qstr qname = QSTR_INIT(name, len); struct inode *inode; - struct qstr qname; - ino_t ino = 0; - unsigned type = DT_UNKNOWN; - - qname.name = name; - qname.len = len; - qname.hash = full_name_hash(name, len); + unsigned type; + ino_t ino; - child = d_lookup(dir, &qname); + child = d_hash_and_lookup(dir, &qname); if (!child) { - struct dentry *new; - new = d_alloc(dir, &qname); - if (new) { - child = instantiate(dir->d_inode, new, task, ptr); - if (child) - dput(new); - else - child = new; + child = d_alloc(dir, &qname); + if (!child) + goto end_instantiate; + if (instantiate(dir->d_inode, child, task, ptr) < 0) { + dput(child); + goto end_instantiate; } } - if (!child || IS_ERR(child) || !child->d_inode) - goto end_instantiate; inode = child->d_inode; - if (inode) { - ino = inode->i_ino; - type = inode->i_mode >> 12; - } + ino = inode->i_ino; + type = inode->i_mode >> 12; dput(child); + return dir_emit(ctx, name, len, ino, type); + end_instantiate: - if (!ino) - ino = find_inode_number(dir, &qname); - if (!ino) - ino = 1; - return filldir(dirent, name, len, filp->f_pos, ino, type); + return dir_emit(ctx, name, len, 1, DT_UNKNOWN); } #ifdef CONFIG_CHECKPOINT_RESTORE @@ -1846,7 +1834,7 @@ struct map_files_info { unsigned char name[4*sizeof(long)+2]; /* max: %lx-%lx\0 */ }; -static struct dentry * +static int proc_map_files_instantiate(struct inode *dir, struct dentry *dentry, struct task_struct *task, const void *ptr) { @@ -1856,7 +1844,7 @@ proc_map_files_instantiate(struct inode *dir, struct dentry *dentry, inode = proc_pid_make_inode(dir->i_sb, task); if (!inode) - return ERR_PTR(-ENOENT); + return -ENOENT; ei = PROC_I(inode); ei->op.proc_get_link = proc_map_files_get_link; @@ -1873,7 +1861,7 @@ proc_map_files_instantiate(struct inode *dir, struct dentry *dentry, d_set_d_op(dentry, &tid_map_files_dentry_operations); d_add(dentry, inode); - return NULL; + return 0; } static struct dentry *proc_map_files_lookup(struct inode *dir, @@ -1882,23 +1870,23 @@ static struct dentry *proc_map_files_lookup(struct inode *dir, unsigned long vm_start, vm_end; struct vm_area_struct *vma; struct task_struct *task; - struct dentry *result; + int result; struct mm_struct *mm; - result = ERR_PTR(-EPERM); + result = -EPERM; if (!capable(CAP_SYS_ADMIN)) goto out; - result = ERR_PTR(-ENOENT); + result = -ENOENT; task = get_proc_task(dir); if (!task) goto out; - result = ERR_PTR(-EACCES); + result = -EACCES; if (!ptrace_may_access(task, PTRACE_MODE_READ)) goto out_put_task; - result = ERR_PTR(-ENOENT); + result = -ENOENT; if (dname_to_vma_addr(dentry, &vm_start, &vm_end)) goto out_put_task; @@ -1921,7 +1909,7 @@ out_no_vma: out_put_task: put_task_struct(task); out: - return result; + return ERR_PTR(result); } static const struct inode_operations proc_map_files_inode_operations = { @@ -1931,14 +1919,15 @@ static const struct inode_operations proc_map_files_inode_operations = { }; static int -proc_map_files_readdir(struct file *filp, void *dirent, filldir_t filldir) +proc_map_files_readdir(struct file *file, struct dir_context *ctx) { - struct dentry *dentry = filp->f_path.dentry; - struct inode *inode = dentry->d_inode; struct vm_area_struct *vma; struct task_struct *task; struct mm_struct *mm; - ino_t ino; + unsigned long nr_files, pos, i; + struct flex_array *fa = NULL; + struct map_files_info info; + struct map_files_info *p; int ret; ret = -EPERM; @@ -1946,7 +1935,7 @@ proc_map_files_readdir(struct file *filp, void *dirent, filldir_t filldir) goto out; ret = -ENOENT; - task = get_proc_task(inode); + task = get_proc_task(file_inode(file)); if (!task) goto out; @@ -1955,91 +1944,73 @@ proc_map_files_readdir(struct file *filp, void *dirent, filldir_t filldir) goto out_put_task; ret = 0; - switch (filp->f_pos) { - case 0: - ino = inode->i_ino; - if (filldir(dirent, ".", 1, 0, ino, DT_DIR) < 0) - goto out_put_task; - filp->f_pos++; - case 1: - ino = parent_ino(dentry); - if (filldir(dirent, "..", 2, 1, ino, DT_DIR) < 0) - goto out_put_task; - filp->f_pos++; - default: - { - unsigned long nr_files, pos, i; - struct flex_array *fa = NULL; - struct map_files_info info; - struct map_files_info *p; - - mm = get_task_mm(task); - if (!mm) - goto out_put_task; - down_read(&mm->mmap_sem); + if (!dir_emit_dots(file, ctx)) + goto out_put_task; - nr_files = 0; + mm = get_task_mm(task); + if (!mm) + goto out_put_task; + down_read(&mm->mmap_sem); - /* - * We need two passes here: - * - * 1) Collect vmas of mapped files with mmap_sem taken - * 2) Release mmap_sem and instantiate entries - * - * otherwise we get lockdep complained, since filldir() - * routine might require mmap_sem taken in might_fault(). - */ + nr_files = 0; - for (vma = mm->mmap, pos = 2; vma; vma = vma->vm_next) { - if (vma->vm_file && ++pos > filp->f_pos) - nr_files++; - } + /* + * We need two passes here: + * + * 1) Collect vmas of mapped files with mmap_sem taken + * 2) Release mmap_sem and instantiate entries + * + * otherwise we get lockdep complained, since filldir() + * routine might require mmap_sem taken in might_fault(). + */ - if (nr_files) { - fa = flex_array_alloc(sizeof(info), nr_files, - GFP_KERNEL); - if (!fa || flex_array_prealloc(fa, 0, nr_files, - GFP_KERNEL)) { - ret = -ENOMEM; - if (fa) - flex_array_free(fa); - up_read(&mm->mmap_sem); - mmput(mm); - goto out_put_task; - } - for (i = 0, vma = mm->mmap, pos = 2; vma; - vma = vma->vm_next) { - if (!vma->vm_file) - continue; - if (++pos <= filp->f_pos) - continue; - - info.mode = vma->vm_file->f_mode; - info.len = snprintf(info.name, - sizeof(info.name), "%lx-%lx", - vma->vm_start, vma->vm_end); - if (flex_array_put(fa, i++, &info, GFP_KERNEL)) - BUG(); - } + for (vma = mm->mmap, pos = 2; vma; vma = vma->vm_next) { + if (vma->vm_file && ++pos > ctx->pos) + nr_files++; + } + + if (nr_files) { + fa = flex_array_alloc(sizeof(info), nr_files, + GFP_KERNEL); + if (!fa || flex_array_prealloc(fa, 0, nr_files, + GFP_KERNEL)) { + ret = -ENOMEM; + if (fa) + flex_array_free(fa); + up_read(&mm->mmap_sem); + mmput(mm); + goto out_put_task; } - up_read(&mm->mmap_sem); - - for (i = 0; i < nr_files; i++) { - p = flex_array_get(fa, i); - ret = proc_fill_cache(filp, dirent, filldir, - p->name, p->len, - proc_map_files_instantiate, - task, - (void *)(unsigned long)p->mode); - if (ret) - break; - filp->f_pos++; + for (i = 0, vma = mm->mmap, pos = 2; vma; + vma = vma->vm_next) { + if (!vma->vm_file) + continue; + if (++pos <= ctx->pos) + continue; + + info.mode = vma->vm_file->f_mode; + info.len = snprintf(info.name, + sizeof(info.name), "%lx-%lx", + vma->vm_start, vma->vm_end); + if (flex_array_put(fa, i++, &info, GFP_KERNEL)) + BUG(); } - if (fa) - flex_array_free(fa); - mmput(mm); } + up_read(&mm->mmap_sem); + + for (i = 0; i < nr_files; i++) { + p = flex_array_get(fa, i); + if (!proc_fill_cache(file, ctx, + p->name, p->len, + proc_map_files_instantiate, + task, + (void *)(unsigned long)p->mode)) + break; + ctx->pos++; } + if (fa) + flex_array_free(fa); + mmput(mm); out_put_task: put_task_struct(task); @@ -2049,7 +2020,7 @@ out: static const struct file_operations proc_map_files_operations = { .read = generic_read_dir, - .readdir = proc_map_files_readdir, + .iterate = proc_map_files_readdir, .llseek = default_llseek, }; @@ -2152,13 +2123,12 @@ static const struct file_operations proc_timers_operations = { }; #endif /* CONFIG_CHECKPOINT_RESTORE */ -static struct dentry *proc_pident_instantiate(struct inode *dir, +static int proc_pident_instantiate(struct inode *dir, struct dentry *dentry, struct task_struct *task, const void *ptr) { const struct pid_entry *p = ptr; struct inode *inode; struct proc_inode *ei; - struct dentry *error = ERR_PTR(-ENOENT); inode = proc_pid_make_inode(dir->i_sb, task); if (!inode) @@ -2177,9 +2147,9 @@ static struct dentry *proc_pident_instantiate(struct inode *dir, d_add(dentry, inode); /* Close the race of the process dying before we return the dentry */ if (pid_revalidate(dentry, 0)) - error = NULL; + return 0; out: - return error; + return -ENOENT; } static struct dentry *proc_pident_lookup(struct inode *dir, @@ -2187,11 +2157,11 @@ static struct dentry *proc_pident_lookup(struct inode *dir, const struct pid_entry *ents, unsigned int nents) { - struct dentry *error; + int error; struct task_struct *task = get_proc_task(dir); const struct pid_entry *p, *last; - error = ERR_PTR(-ENOENT); + error = -ENOENT; if (!task) goto out_no_task; @@ -2214,70 +2184,33 @@ static struct dentry *proc_pident_lookup(struct inode *dir, out: put_task_struct(task); out_no_task: - return error; -} - -static int proc_pident_fill_cache(struct file *filp, void *dirent, - filldir_t filldir, struct task_struct *task, const struct pid_entry *p) -{ - return proc_fill_cache(filp, dirent, filldir, p->name, p->len, - proc_pident_instantiate, task, p); + return ERR_PTR(error); } -static int proc_pident_readdir(struct file *filp, - void *dirent, filldir_t filldir, +static int proc_pident_readdir(struct file *file, struct dir_context *ctx, const struct pid_entry *ents, unsigned int nents) { - int i; - struct dentry *dentry = filp->f_path.dentry; - struct inode *inode = dentry->d_inode; - struct task_struct *task = get_proc_task(inode); - const struct pid_entry *p, *last; - ino_t ino; - int ret; + struct task_struct *task = get_proc_task(file_inode(file)); + const struct pid_entry *p; - ret = -ENOENT; if (!task) - goto out_no_task; + return -ENOENT; - ret = 0; - i = filp->f_pos; - switch (i) { - case 0: - ino = inode->i_ino; - if (filldir(dirent, ".", 1, i, ino, DT_DIR) < 0) - goto out; - i++; - filp->f_pos++; - /* fall through */ - case 1: - ino = parent_ino(dentry); - if (filldir(dirent, "..", 2, i, ino, DT_DIR) < 0) - goto out; - i++; - filp->f_pos++; - /* fall through */ - default: - i -= 2; - if (i >= nents) { - ret = 1; - goto out; - } - p = ents + i; - last = &ents[nents - 1]; - while (p <= last) { - if (proc_pident_fill_cache(filp, dirent, filldir, task, p) < 0) - goto out; - filp->f_pos++; - p++; - } - } + if (!dir_emit_dots(file, ctx)) + goto out; + + if (ctx->pos >= nents + 2) + goto out; - ret = 1; + for (p = ents + (ctx->pos - 2); p <= ents + nents - 1; p++) { + if (!proc_fill_cache(file, ctx, p->name, p->len, + proc_pident_instantiate, task, p)) + break; + ctx->pos++; + } out: put_task_struct(task); -out_no_task: - return ret; + return 0; } #ifdef CONFIG_SECURITY @@ -2362,16 +2295,15 @@ static const struct pid_entry attr_dir_stuff[] = { REG("sockcreate", S_IRUGO|S_IWUGO, proc_pid_attr_operations), }; -static int proc_attr_dir_readdir(struct file * filp, - void * dirent, filldir_t filldir) +static int proc_attr_dir_readdir(struct file *file, struct dir_context *ctx) { - return proc_pident_readdir(filp,dirent,filldir, - attr_dir_stuff,ARRAY_SIZE(attr_dir_stuff)); + return proc_pident_readdir(file, ctx, + attr_dir_stuff, ARRAY_SIZE(attr_dir_stuff)); } static const struct file_operations proc_attr_dir_operations = { .read = generic_read_dir, - .readdir = proc_attr_dir_readdir, + .iterate = proc_attr_dir_readdir, .llseek = default_llseek, }; @@ -2725,16 +2657,15 @@ static const struct pid_entry tgid_base_stuff[] = { #endif }; -static int proc_tgid_base_readdir(struct file * filp, - void * dirent, filldir_t filldir) +static int proc_tgid_base_readdir(struct file *file, struct dir_context *ctx) { - return proc_pident_readdir(filp,dirent,filldir, - tgid_base_stuff,ARRAY_SIZE(tgid_base_stuff)); + return proc_pident_readdir(file, ctx, + tgid_base_stuff, ARRAY_SIZE(tgid_base_stuff)); } static const struct file_operations proc_tgid_base_operations = { .read = generic_read_dir, - .readdir = proc_tgid_base_readdir, + .iterate = proc_tgid_base_readdir, .llseek = default_llseek, }; @@ -2836,11 +2767,10 @@ void proc_flush_task(struct task_struct *task) } } -static struct dentry *proc_pid_instantiate(struct inode *dir, - struct dentry * dentry, - struct task_struct *task, const void *ptr) +static int proc_pid_instantiate(struct inode *dir, + struct dentry * dentry, + struct task_struct *task, const void *ptr) { - struct dentry *error = ERR_PTR(-ENOENT); struct inode *inode; inode = proc_pid_make_inode(dir->i_sb, task); @@ -2860,14 +2790,14 @@ static struct dentry *proc_pid_instantiate(struct inode *dir, d_add(dentry, inode); /* Close the race of the process dying before we return the dentry */ if (pid_revalidate(dentry, 0)) - error = NULL; + return 0; out: - return error; + return -ENOENT; } struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, unsigned int flags) { - struct dentry *result = NULL; + int result = 0; struct task_struct *task; unsigned tgid; struct pid_namespace *ns; @@ -2888,7 +2818,7 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, unsign result = proc_pid_instantiate(dir, dentry, task, NULL); put_task_struct(task); out: - return result; + return ERR_PTR(result); } /* @@ -2936,58 +2866,42 @@ retry: #define TGID_OFFSET (FIRST_PROCESS_ENTRY + 1) -static int proc_pid_fill_cache(struct file *filp, void *dirent, filldir_t filldir, - struct tgid_iter iter) -{ - char name[PROC_NUMBUF]; - int len = snprintf(name, sizeof(name), "%d", iter.tgid); - return proc_fill_cache(filp, dirent, filldir, name, len, - proc_pid_instantiate, iter.task, NULL); -} - -static int fake_filldir(void *buf, const char *name, int namelen, - loff_t offset, u64 ino, unsigned d_type) -{ - return 0; -} - /* for the /proc/ directory itself, after non-process stuff has been done */ -int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir) +int proc_pid_readdir(struct file *file, struct dir_context *ctx) { struct tgid_iter iter; - struct pid_namespace *ns; - filldir_t __filldir; - loff_t pos = filp->f_pos; + struct pid_namespace *ns = file->f_dentry->d_sb->s_fs_info; + loff_t pos = ctx->pos; if (pos >= PID_MAX_LIMIT + TGID_OFFSET) - goto out; + return 0; if (pos == TGID_OFFSET - 1) { - if (proc_fill_cache(filp, dirent, filldir, "self", 4, - NULL, NULL, NULL) < 0) - goto out; + struct inode *inode = ns->proc_self->d_inode; + if (!dir_emit(ctx, "self", 4, inode->i_ino, DT_LNK)) + return 0; iter.tgid = 0; } else { iter.tgid = pos - TGID_OFFSET; } iter.task = NULL; - ns = filp->f_dentry->d_sb->s_fs_info; for (iter = next_tgid(ns, iter); iter.task; iter.tgid += 1, iter = next_tgid(ns, iter)) { - if (has_pid_permissions(ns, iter.task, 2)) - __filldir = filldir; - else - __filldir = fake_filldir; + char name[PROC_NUMBUF]; + int len; + if (!has_pid_permissions(ns, iter.task, 2)) + continue; - filp->f_pos = iter.tgid + TGID_OFFSET; - if (proc_pid_fill_cache(filp, dirent, __filldir, iter) < 0) { + len = snprintf(name, sizeof(name), "%d", iter.tgid); + ctx->pos = iter.tgid + TGID_OFFSET; + if (!proc_fill_cache(file, ctx, name, len, + proc_pid_instantiate, iter.task, NULL)) { put_task_struct(iter.task); - goto out; + return 0; } } - filp->f_pos = PID_MAX_LIMIT + TGID_OFFSET; -out: + ctx->pos = PID_MAX_LIMIT + TGID_OFFSET; return 0; } @@ -3075,11 +2989,10 @@ static const struct pid_entry tid_base_stuff[] = { #endif }; -static int proc_tid_base_readdir(struct file * filp, - void * dirent, filldir_t filldir) +static int proc_tid_base_readdir(struct file *file, struct dir_context *ctx) { - return proc_pident_readdir(filp,dirent,filldir, - tid_base_stuff,ARRAY_SIZE(tid_base_stuff)); + return proc_pident_readdir(file, ctx, + tid_base_stuff, ARRAY_SIZE(tid_base_stuff)); } static struct dentry *proc_tid_base_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) @@ -3090,7 +3003,7 @@ static struct dentry *proc_tid_base_lookup(struct inode *dir, struct dentry *den static const struct file_operations proc_tid_base_operations = { .read = generic_read_dir, - .readdir = proc_tid_base_readdir, + .iterate = proc_tid_base_readdir, .llseek = default_llseek, }; @@ -3100,10 +3013,9 @@ static const struct inode_operations proc_tid_base_inode_operations = { .setattr = proc_setattr, }; -static struct dentry *proc_task_instantiate(struct inode *dir, +static int proc_task_instantiate(struct inode *dir, struct dentry *dentry, struct task_struct *task, const void *ptr) { - struct dentry *error = ERR_PTR(-ENOENT); struct inode *inode; inode = proc_pid_make_inode(dir->i_sb, task); @@ -3122,14 +3034,14 @@ static struct dentry *proc_task_instantiate(struct inode *dir, d_add(dentry, inode); /* Close the race of the process dying before we return the dentry */ if (pid_revalidate(dentry, 0)) - error = NULL; + return 0; out: - return error; + return -ENOENT; } static struct dentry *proc_task_lookup(struct inode *dir, struct dentry * dentry, unsigned int flags) { - struct dentry *result = ERR_PTR(-ENOENT); + int result = -ENOENT; struct task_struct *task; struct task_struct *leader = get_proc_task(dir); unsigned tid; @@ -3159,7 +3071,7 @@ out_drop_task: out: put_task_struct(leader); out_no_task: - return result; + return ERR_PTR(result); } /* @@ -3231,30 +3143,16 @@ static struct task_struct *next_tid(struct task_struct *start) return pos; } -static int proc_task_fill_cache(struct file *filp, void *dirent, filldir_t filldir, - struct task_struct *task, int tid) -{ - char name[PROC_NUMBUF]; - int len = snprintf(name, sizeof(name), "%d", tid); - return proc_fill_cache(filp, dirent, filldir, name, len, - proc_task_instantiate, task, NULL); -} - /* for the /proc/TGID/task/ directories */ -static int proc_task_readdir(struct file * filp, void * dirent, filldir_t filldir) +static int proc_task_readdir(struct file *file, struct dir_context *ctx) { - struct dentry *dentry = filp->f_path.dentry; - struct inode *inode = dentry->d_inode; struct task_struct *leader = NULL; - struct task_struct *task; - int retval = -ENOENT; - ino_t ino; - int tid; + struct task_struct *task = get_proc_task(file_inode(file)); struct pid_namespace *ns; + int tid; - task = get_proc_task(inode); if (!task) - goto out_no_task; + return -ENOENT; rcu_read_lock(); if (pid_alive(task)) { leader = task->group_leader; @@ -3263,46 +3161,36 @@ static int proc_task_readdir(struct file * filp, void * dirent, filldir_t filldi rcu_read_unlock(); put_task_struct(task); if (!leader) - goto out_no_task; - retval = 0; + return -ENOENT; - switch ((unsigned long)filp->f_pos) { - case 0: - ino = inode->i_ino; - if (filldir(dirent, ".", 1, filp->f_pos, ino, DT_DIR) < 0) - goto out; - filp->f_pos++; - /* fall through */ - case 1: - ino = parent_ino(dentry); - if (filldir(dirent, "..", 2, filp->f_pos, ino, DT_DIR) < 0) - goto out; - filp->f_pos++; - /* fall through */ - } + if (!dir_emit_dots(file, ctx)) + goto out; /* f_version caches the tgid value that the last readdir call couldn't * return. lseek aka telldir automagically resets f_version to 0. */ - ns = filp->f_dentry->d_sb->s_fs_info; - tid = (int)filp->f_version; - filp->f_version = 0; - for (task = first_tid(leader, tid, filp->f_pos - 2, ns); + ns = file->f_dentry->d_sb->s_fs_info; + tid = (int)file->f_version; + file->f_version = 0; + for (task = first_tid(leader, tid, ctx->pos - 2, ns); task; - task = next_tid(task), filp->f_pos++) { + task = next_tid(task), ctx->pos++) { + char name[PROC_NUMBUF]; + int len; tid = task_pid_nr_ns(task, ns); - if (proc_task_fill_cache(filp, dirent, filldir, task, tid) < 0) { + len = snprintf(name, sizeof(name), "%d", tid); + if (!proc_fill_cache(file, ctx, name, len, + proc_task_instantiate, task, NULL)) { /* returning this tgid failed, save it as the first * pid for the next readir call */ - filp->f_version = (u64)tid; + file->f_version = (u64)tid; put_task_struct(task); break; } } out: put_task_struct(leader); -out_no_task: - return retval; + return 0; } static int proc_task_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) @@ -3328,6 +3216,6 @@ static const struct inode_operations proc_task_inode_operations = { static const struct file_operations proc_task_operations = { .read = generic_read_dir, - .readdir = proc_task_readdir, + .iterate = proc_task_readdir, .llseek = default_llseek, }; diff --git a/fs/proc/fd.c b/fs/proc/fd.c index d7a4a28ef630..75f2890abbd8 100644 --- a/fs/proc/fd.c +++ b/fs/proc/fd.c @@ -167,11 +167,10 @@ static int proc_fd_link(struct dentry *dentry, struct path *path) return ret; } -static struct dentry * +static int proc_fd_instantiate(struct inode *dir, struct dentry *dentry, struct task_struct *task, const void *ptr) { - struct dentry *error = ERR_PTR(-ENOENT); unsigned fd = (unsigned long)ptr; struct proc_inode *ei; struct inode *inode; @@ -194,9 +193,9 @@ proc_fd_instantiate(struct inode *dir, struct dentry *dentry, /* Close the race of the process dying before we return the dentry */ if (tid_fd_revalidate(dentry, 0)) - error = NULL; + return 0; out: - return error; + return -ENOENT; } static struct dentry *proc_lookupfd_common(struct inode *dir, @@ -204,7 +203,7 @@ static struct dentry *proc_lookupfd_common(struct inode *dir, instantiate_t instantiate) { struct task_struct *task = get_proc_task(dir); - struct dentry *result = ERR_PTR(-ENOENT); + int result = -ENOENT; unsigned fd = name_to_int(dentry); if (!task) @@ -216,77 +215,61 @@ static struct dentry *proc_lookupfd_common(struct inode *dir, out: put_task_struct(task); out_no_task: - return result; + return ERR_PTR(result); } -static int proc_readfd_common(struct file * filp, void * dirent, - filldir_t filldir, instantiate_t instantiate) +static int proc_readfd_common(struct file *file, struct dir_context *ctx, + instantiate_t instantiate) { - struct dentry *dentry = filp->f_path.dentry; - struct inode *inode = dentry->d_inode; - struct task_struct *p = get_proc_task(inode); + struct task_struct *p = get_proc_task(file_inode(file)); struct files_struct *files; - unsigned int fd, ino; - int retval; + unsigned int fd; - retval = -ENOENT; if (!p) - goto out_no_task; - retval = 0; - - fd = filp->f_pos; - switch (fd) { - case 0: - if (filldir(dirent, ".", 1, 0, inode->i_ino, DT_DIR) < 0) - goto out; - filp->f_pos++; - case 1: - ino = parent_ino(dentry); - if (filldir(dirent, "..", 2, 1, ino, DT_DIR) < 0) - goto out; - filp->f_pos++; - default: - files = get_files_struct(p); - if (!files) - goto out; - rcu_read_lock(); - for (fd = filp->f_pos - 2; - fd < files_fdtable(files)->max_fds; - fd++, filp->f_pos++) { - char name[PROC_NUMBUF]; - int len; - int rv; - - if (!fcheck_files(files, fd)) - continue; - rcu_read_unlock(); + return -ENOENT; - len = snprintf(name, sizeof(name), "%d", fd); - rv = proc_fill_cache(filp, dirent, filldir, - name, len, instantiate, p, - (void *)(unsigned long)fd); - if (rv < 0) - goto out_fd_loop; - rcu_read_lock(); - } - rcu_read_unlock(); -out_fd_loop: - put_files_struct(files); + if (!dir_emit_dots(file, ctx)) + goto out; + if (!dir_emit_dots(file, ctx)) + goto out; + files = get_files_struct(p); + if (!files) + goto out; + + rcu_read_lock(); + for (fd = ctx->pos - 2; + fd < files_fdtable(files)->max_fds; + fd++, ctx->pos++) { + char name[PROC_NUMBUF]; + int len; + + if (!fcheck_files(files, fd)) + continue; + rcu_read_unlock(); + + len = snprintf(name, sizeof(name), "%d", fd); + if (!proc_fill_cache(file, ctx, + name, len, instantiate, p, + (void *)(unsigned long)fd)) + goto out_fd_loop; + rcu_read_lock(); } + rcu_read_unlock(); +out_fd_loop: + put_files_struct(files); out: put_task_struct(p); -out_no_task: - return retval; + return 0; } -static int proc_readfd(struct file *filp, void *dirent, filldir_t filldir) +static int proc_readfd(struct file *file, struct dir_context *ctx) { - return proc_readfd_common(filp, dirent, filldir, proc_fd_instantiate); + return proc_readfd_common(file, ctx, proc_fd_instantiate); } const struct file_operations proc_fd_operations = { .read = generic_read_dir, - .readdir = proc_readfd, + .iterate = proc_readfd, .llseek = default_llseek, }; @@ -316,11 +299,10 @@ const struct inode_operations proc_fd_inode_operations = { .setattr = proc_setattr, }; -static struct dentry * +static int proc_fdinfo_instantiate(struct inode *dir, struct dentry *dentry, struct task_struct *task, const void *ptr) { - struct dentry *error = ERR_PTR(-ENOENT); unsigned fd = (unsigned long)ptr; struct proc_inode *ei; struct inode *inode; @@ -340,9 +322,9 @@ proc_fdinfo_instantiate(struct inode *dir, struct dentry *dentry, /* Close the race of the process dying before we return the dentry */ if (tid_fd_revalidate(dentry, 0)) - error = NULL; + return 0; out: - return error; + return -ENOENT; } static struct dentry * @@ -351,9 +333,9 @@ proc_lookupfdinfo(struct inode *dir, struct dentry *dentry, unsigned int flags) return proc_lookupfd_common(dir, dentry, proc_fdinfo_instantiate); } -static int proc_readfdinfo(struct file *filp, void *dirent, filldir_t filldir) +static int proc_readfdinfo(struct file *file, struct dir_context *ctx) { - return proc_readfd_common(filp, dirent, filldir, + return proc_readfd_common(file, ctx, proc_fdinfo_instantiate); } @@ -364,6 +346,6 @@ const struct inode_operations proc_fdinfo_inode_operations = { const struct file_operations proc_fdinfo_operations = { .read = generic_read_dir, - .readdir = proc_readfdinfo, + .iterate = proc_readfdinfo, .llseek = default_llseek, }; diff --git a/fs/proc/generic.c b/fs/proc/generic.c index a2596afffae6..94441a407337 100644 --- a/fs/proc/generic.c +++ b/fs/proc/generic.c @@ -233,76 +233,52 @@ struct dentry *proc_lookup(struct inode *dir, struct dentry *dentry, * value of the readdir() call, as long as it's non-negative * for success.. */ -int proc_readdir_de(struct proc_dir_entry *de, struct file *filp, void *dirent, - filldir_t filldir) +int proc_readdir_de(struct proc_dir_entry *de, struct file *file, + struct dir_context *ctx) { - unsigned int ino; int i; - struct inode *inode = file_inode(filp); - int ret = 0; - - ino = inode->i_ino; - i = filp->f_pos; - switch (i) { - case 0: - if (filldir(dirent, ".", 1, i, ino, DT_DIR) < 0) - goto out; - i++; - filp->f_pos++; - /* fall through */ - case 1: - if (filldir(dirent, "..", 2, i, - parent_ino(filp->f_path.dentry), - DT_DIR) < 0) - goto out; - i++; - filp->f_pos++; - /* fall through */ - default: - spin_lock(&proc_subdir_lock); - de = de->subdir; - i -= 2; - for (;;) { - if (!de) { - ret = 1; - spin_unlock(&proc_subdir_lock); - goto out; - } - if (!i) - break; - de = de->next; - i--; - } - do { - struct proc_dir_entry *next; - - /* filldir passes info to user space */ - pde_get(de); - spin_unlock(&proc_subdir_lock); - if (filldir(dirent, de->name, de->namelen, filp->f_pos, - de->low_ino, de->mode >> 12) < 0) { - pde_put(de); - goto out; - } - spin_lock(&proc_subdir_lock); - filp->f_pos++; - next = de->next; - pde_put(de); - de = next; - } while (de); + if (!dir_emit_dots(file, ctx)) + return 0; + + spin_lock(&proc_subdir_lock); + de = de->subdir; + i = ctx->pos - 2; + for (;;) { + if (!de) { spin_unlock(&proc_subdir_lock); + return 0; + } + if (!i) + break; + de = de->next; + i--; } - ret = 1; -out: - return ret; + + do { + struct proc_dir_entry *next; + pde_get(de); + spin_unlock(&proc_subdir_lock); + if (!dir_emit(ctx, de->name, de->namelen, + de->low_ino, de->mode >> 12)) { + pde_put(de); + return 0; + } + spin_lock(&proc_subdir_lock); + ctx->pos++; + next = de->next; + pde_put(de); + de = next; + } while (de); + spin_unlock(&proc_subdir_lock); + return 0; } -int proc_readdir(struct file *filp, void *dirent, filldir_t filldir) +int proc_readdir(struct file *file, struct dir_context *ctx) { - struct inode *inode = file_inode(filp); + struct inode *inode = file_inode(file); - return proc_readdir_de(PDE(inode), filp, dirent, filldir); + return proc_readdir_de(PDE(inode), file, ctx); } /* @@ -313,7 +289,7 @@ int proc_readdir(struct file *filp, void *dirent, filldir_t filldir) static const struct file_operations proc_dir_operations = { .llseek = generic_file_llseek, .read = generic_read_dir, - .readdir = proc_readdir, + .iterate = proc_readdir, }; /* diff --git a/fs/proc/internal.h b/fs/proc/internal.h index d600fb098b6a..651d09a11dde 100644 --- a/fs/proc/internal.h +++ b/fs/proc/internal.h @@ -165,14 +165,14 @@ extern int proc_setattr(struct dentry *, struct iattr *); extern struct inode *proc_pid_make_inode(struct super_block *, struct task_struct *); extern int pid_revalidate(struct dentry *, unsigned int); extern int pid_delete_dentry(const struct dentry *); -extern int proc_pid_readdir(struct file *, void *, filldir_t); +extern int proc_pid_readdir(struct file *, struct dir_context *); extern struct dentry *proc_pid_lookup(struct inode *, struct dentry *, unsigned int); extern loff_t mem_lseek(struct file *, loff_t, int); /* Lookups */ -typedef struct dentry *instantiate_t(struct inode *, struct dentry *, +typedef int instantiate_t(struct inode *, struct dentry *, struct task_struct *, const void *); -extern int proc_fill_cache(struct file *, void *, filldir_t, const char *, int, +extern bool proc_fill_cache(struct file *, struct dir_context *, const char *, int, instantiate_t, struct task_struct *, const void *); /* @@ -183,8 +183,8 @@ extern spinlock_t proc_subdir_lock; extern struct dentry *proc_lookup(struct inode *, struct dentry *, unsigned int); extern struct dentry *proc_lookup_de(struct proc_dir_entry *, struct inode *, struct dentry *); -extern int proc_readdir(struct file *, void *, filldir_t); -extern int proc_readdir_de(struct proc_dir_entry *, struct file *, void *, filldir_t); +extern int proc_readdir(struct file *, struct dir_context *); +extern int proc_readdir_de(struct proc_dir_entry *, struct file *, struct dir_context *); static inline struct proc_dir_entry *pde_get(struct proc_dir_entry *pde) { diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c index 0a22194e5d58..06ea155e1a59 100644 --- a/fs/proc/kcore.c +++ b/fs/proc/kcore.c @@ -408,7 +408,7 @@ static void elf_kcore_store_hdr(char *bufp, int nphdr, int dataoff) prpsinfo.pr_zomb = 0; strcpy(prpsinfo.pr_fname, "vmlinux"); - strncpy(prpsinfo.pr_psargs, saved_command_line, ELF_PRARGSZ); + strlcpy(prpsinfo.pr_psargs, saved_command_line, sizeof(prpsinfo.pr_psargs)); nhdr->p_filesz += notesize(¬es[1]); bufp = storenote(¬es[1], bufp); diff --git a/fs/proc/kmsg.c b/fs/proc/kmsg.c index bd4b5a740ff1..bdfabdaefdce 100644 --- a/fs/proc/kmsg.c +++ b/fs/proc/kmsg.c @@ -21,12 +21,12 @@ extern wait_queue_head_t log_wait; static int kmsg_open(struct inode * inode, struct file * file) { - return do_syslog(SYSLOG_ACTION_OPEN, NULL, 0, SYSLOG_FROM_FILE); + return do_syslog(SYSLOG_ACTION_OPEN, NULL, 0, SYSLOG_FROM_PROC); } static int kmsg_release(struct inode * inode, struct file * file) { - (void) do_syslog(SYSLOG_ACTION_CLOSE, NULL, 0, SYSLOG_FROM_FILE); + (void) do_syslog(SYSLOG_ACTION_CLOSE, NULL, 0, SYSLOG_FROM_PROC); return 0; } @@ -34,15 +34,15 @@ static ssize_t kmsg_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { if ((file->f_flags & O_NONBLOCK) && - !do_syslog(SYSLOG_ACTION_SIZE_UNREAD, NULL, 0, SYSLOG_FROM_FILE)) + !do_syslog(SYSLOG_ACTION_SIZE_UNREAD, NULL, 0, SYSLOG_FROM_PROC)) return -EAGAIN; - return do_syslog(SYSLOG_ACTION_READ, buf, count, SYSLOG_FROM_FILE); + return do_syslog(SYSLOG_ACTION_READ, buf, count, SYSLOG_FROM_PROC); } static unsigned int kmsg_poll(struct file *file, poll_table *wait) { poll_wait(file, &log_wait, wait); - if (do_syslog(SYSLOG_ACTION_SIZE_UNREAD, NULL, 0, SYSLOG_FROM_FILE)) + if (do_syslog(SYSLOG_ACTION_SIZE_UNREAD, NULL, 0, SYSLOG_FROM_PROC)) return POLLIN | POLLRDNORM; return 0; } diff --git a/fs/proc/namespaces.c b/fs/proc/namespaces.c index 54bdc6701e9f..49a7fff2e83a 100644 --- a/fs/proc/namespaces.c +++ b/fs/proc/namespaces.c @@ -187,13 +187,12 @@ static const struct inode_operations proc_ns_link_inode_operations = { .setattr = proc_setattr, }; -static struct dentry *proc_ns_instantiate(struct inode *dir, +static int proc_ns_instantiate(struct inode *dir, struct dentry *dentry, struct task_struct *task, const void *ptr) { const struct proc_ns_operations *ns_ops = ptr; struct inode *inode; struct proc_inode *ei; - struct dentry *error = ERR_PTR(-ENOENT); inode = proc_pid_make_inode(dir->i_sb, task); if (!inode) @@ -208,90 +207,52 @@ static struct dentry *proc_ns_instantiate(struct inode *dir, d_add(dentry, inode); /* Close the race of the process dying before we return the dentry */ if (pid_revalidate(dentry, 0)) - error = NULL; + return 0; out: - return error; -} - -static int proc_ns_fill_cache(struct file *filp, void *dirent, - filldir_t filldir, struct task_struct *task, - const struct proc_ns_operations *ops) -{ - return proc_fill_cache(filp, dirent, filldir, - ops->name, strlen(ops->name), - proc_ns_instantiate, task, ops); + return -ENOENT; } -static int proc_ns_dir_readdir(struct file *filp, void *dirent, - filldir_t filldir) +static int proc_ns_dir_readdir(struct file *file, struct dir_context *ctx) { - int i; - struct dentry *dentry = filp->f_path.dentry; - struct inode *inode = dentry->d_inode; - struct task_struct *task = get_proc_task(inode); + struct task_struct *task = get_proc_task(file_inode(file)); const struct proc_ns_operations **entry, **last; - ino_t ino; - int ret; - ret = -ENOENT; if (!task) - goto out_no_task; + return -ENOENT; - ret = 0; - i = filp->f_pos; - switch (i) { - case 0: - ino = inode->i_ino; - if (filldir(dirent, ".", 1, i, ino, DT_DIR) < 0) - goto out; - i++; - filp->f_pos++; - /* fall through */ - case 1: - ino = parent_ino(dentry); - if (filldir(dirent, "..", 2, i, ino, DT_DIR) < 0) - goto out; - i++; - filp->f_pos++; - /* fall through */ - default: - i -= 2; - if (i >= ARRAY_SIZE(ns_entries)) { - ret = 1; - goto out; - } - entry = ns_entries + i; - last = &ns_entries[ARRAY_SIZE(ns_entries) - 1]; - while (entry <= last) { - if (proc_ns_fill_cache(filp, dirent, filldir, - task, *entry) < 0) - goto out; - filp->f_pos++; - entry++; - } + if (!dir_emit_dots(file, ctx)) + goto out; + if (ctx->pos >= 2 + ARRAY_SIZE(ns_entries)) + goto out; + entry = ns_entries + (ctx->pos - 2); + last = &ns_entries[ARRAY_SIZE(ns_entries) - 1]; + while (entry <= last) { + const struct proc_ns_operations *ops = *entry; + if (!proc_fill_cache(file, ctx, ops->name, strlen(ops->name), + proc_ns_instantiate, task, ops)) + break; + ctx->pos++; + entry++; } - - ret = 1; out: put_task_struct(task); -out_no_task: - return ret; + return 0; } const struct file_operations proc_ns_dir_operations = { .read = generic_read_dir, - .readdir = proc_ns_dir_readdir, + .iterate = proc_ns_dir_readdir, }; static struct dentry *proc_ns_dir_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) { - struct dentry *error; + int error; struct task_struct *task = get_proc_task(dir); const struct proc_ns_operations **entry, **last; unsigned int len = dentry->d_name.len; - error = ERR_PTR(-ENOENT); + error = -ENOENT; if (!task) goto out_no_task; @@ -310,7 +271,7 @@ static struct dentry *proc_ns_dir_lookup(struct inode *dir, out: put_task_struct(task); out_no_task: - return error; + return ERR_PTR(error); } const struct inode_operations proc_ns_dir_inode_operations = { diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c index 986e83220d56..4677bb7dc7c2 100644 --- a/fs/proc/proc_net.c +++ b/fs/proc/proc_net.c @@ -160,16 +160,15 @@ const struct inode_operations proc_net_inode_operations = { .getattr = proc_tgid_net_getattr, }; -static int proc_tgid_net_readdir(struct file *filp, void *dirent, - filldir_t filldir) +static int proc_tgid_net_readdir(struct file *file, struct dir_context *ctx) { int ret; struct net *net; ret = -EINVAL; - net = get_proc_task_net(file_inode(filp)); + net = get_proc_task_net(file_inode(file)); if (net != NULL) { - ret = proc_readdir_de(net->proc_net, filp, dirent, filldir); + ret = proc_readdir_de(net->proc_net, file, ctx); put_net(net); } return ret; @@ -178,7 +177,7 @@ static int proc_tgid_net_readdir(struct file *filp, void *dirent, const struct file_operations proc_net_operations = { .llseek = generic_file_llseek, .read = generic_read_dir, - .readdir = proc_tgid_net_readdir, + .iterate = proc_tgid_net_readdir, }; static __net_init int proc_net_ns_init(struct net *net) diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c index ac05f33a0dde..71290463a1d3 100644 --- a/fs/proc/proc_sysctl.c +++ b/fs/proc/proc_sysctl.c @@ -573,12 +573,12 @@ out: return ret; } -static int proc_sys_fill_cache(struct file *filp, void *dirent, - filldir_t filldir, +static bool proc_sys_fill_cache(struct file *file, + struct dir_context *ctx, struct ctl_table_header *head, struct ctl_table *table) { - struct dentry *child, *dir = filp->f_path.dentry; + struct dentry *child, *dir = file->f_path.dentry; struct inode *inode; struct qstr qname; ino_t ino = 0; @@ -595,38 +595,38 @@ static int proc_sys_fill_cache(struct file *filp, void *dirent, inode = proc_sys_make_inode(dir->d_sb, head, table); if (!inode) { dput(child); - return -ENOMEM; + return false; } else { d_set_d_op(child, &proc_sys_dentry_operations); d_add(child, inode); } } else { - return -ENOMEM; + return false; } } inode = child->d_inode; ino = inode->i_ino; type = inode->i_mode >> 12; dput(child); - return !!filldir(dirent, qname.name, qname.len, filp->f_pos, ino, type); + return dir_emit(ctx, qname.name, qname.len, ino, type); } -static int proc_sys_link_fill_cache(struct file *filp, void *dirent, - filldir_t filldir, +static bool proc_sys_link_fill_cache(struct file *file, + struct dir_context *ctx, struct ctl_table_header *head, struct ctl_table *table) { - int err, ret = 0; + bool ret = true; head = sysctl_head_grab(head); if (S_ISLNK(table->mode)) { /* It is not an error if we can not follow the link ignore it */ - err = sysctl_follow_link(&head, &table, current->nsproxy); + int err = sysctl_follow_link(&head, &table, current->nsproxy); if (err) goto out; } - ret = proc_sys_fill_cache(filp, dirent, filldir, head, table); + ret = proc_sys_fill_cache(file, ctx, head, table); out: sysctl_head_finish(head); return ret; @@ -634,67 +634,50 @@ out: static int scan(struct ctl_table_header *head, ctl_table *table, unsigned long *pos, struct file *file, - void *dirent, filldir_t filldir) + struct dir_context *ctx) { - int res; + bool res; - if ((*pos)++ < file->f_pos) - return 0; + if ((*pos)++ < ctx->pos) + return true; if (unlikely(S_ISLNK(table->mode))) - res = proc_sys_link_fill_cache(file, dirent, filldir, head, table); + res = proc_sys_link_fill_cache(file, ctx, head, table); else - res = proc_sys_fill_cache(file, dirent, filldir, head, table); + res = proc_sys_fill_cache(file, ctx, head, table); - if (res == 0) - file->f_pos = *pos; + if (res) + ctx->pos = *pos; return res; } -static int proc_sys_readdir(struct file *filp, void *dirent, filldir_t filldir) +static int proc_sys_readdir(struct file *file, struct dir_context *ctx) { - struct dentry *dentry = filp->f_path.dentry; - struct inode *inode = dentry->d_inode; - struct ctl_table_header *head = grab_header(inode); + struct ctl_table_header *head = grab_header(file_inode(file)); struct ctl_table_header *h = NULL; struct ctl_table *entry; struct ctl_dir *ctl_dir; unsigned long pos; - int ret = -EINVAL; if (IS_ERR(head)) return PTR_ERR(head); ctl_dir = container_of(head, struct ctl_dir, header); - ret = 0; - /* Avoid a switch here: arm builds fail with missing __cmpdi2 */ - if (filp->f_pos == 0) { - if (filldir(dirent, ".", 1, filp->f_pos, - inode->i_ino, DT_DIR) < 0) - goto out; - filp->f_pos++; - } - if (filp->f_pos == 1) { - if (filldir(dirent, "..", 2, filp->f_pos, - parent_ino(dentry), DT_DIR) < 0) - goto out; - filp->f_pos++; - } + if (!dir_emit_dots(file, ctx)) + return 0; + pos = 2; for (first_entry(ctl_dir, &h, &entry); h; next_entry(&h, &entry)) { - ret = scan(h, entry, &pos, filp, dirent, filldir); - if (ret) { + if (!scan(h, entry, &pos, file, ctx)) { sysctl_head_finish(h); break; } } - ret = 1; -out: sysctl_head_finish(head); - return ret; + return 0; } static int proc_sys_permission(struct inode *inode, int mask) @@ -769,7 +752,7 @@ static const struct file_operations proc_sys_file_operations = { static const struct file_operations proc_sys_dir_file_operations = { .read = generic_read_dir, - .readdir = proc_sys_readdir, + .iterate = proc_sys_readdir, .llseek = generic_file_llseek, }; @@ -813,15 +796,16 @@ static int sysctl_is_seen(struct ctl_table_header *p) return res; } -static int proc_sys_compare(const struct dentry *parent, - const struct inode *pinode, - const struct dentry *dentry, const struct inode *inode, +static int proc_sys_compare(const struct dentry *parent, const struct dentry *dentry, unsigned int len, const char *str, const struct qstr *name) { struct ctl_table_header *head; + struct inode *inode; + /* Although proc doesn't have negative dentries, rcu-walk means * that inode here can be NULL */ /* AV: can it, indeed? */ + inode = ACCESS_ONCE(dentry->d_inode); if (!inode) return 1; if (name->len != len) diff --git a/fs/proc/root.c b/fs/proc/root.c index 41a6ea93f486..229e366598da 100644 --- a/fs/proc/root.c +++ b/fs/proc/root.c @@ -202,21 +202,14 @@ static struct dentry *proc_root_lookup(struct inode * dir, struct dentry * dentr return proc_pid_lookup(dir, dentry, flags); } -static int proc_root_readdir(struct file * filp, - void * dirent, filldir_t filldir) +static int proc_root_readdir(struct file *file, struct dir_context *ctx) { - unsigned int nr = filp->f_pos; - int ret; - - if (nr < FIRST_PROCESS_ENTRY) { - int error = proc_readdir(filp, dirent, filldir); - if (error <= 0) - return error; - filp->f_pos = FIRST_PROCESS_ENTRY; + if (ctx->pos < FIRST_PROCESS_ENTRY) { + proc_readdir(file, ctx); + ctx->pos = FIRST_PROCESS_ENTRY; } - ret = proc_pid_readdir(filp, dirent, filldir); - return ret; + return proc_pid_readdir(file, ctx); } /* @@ -226,7 +219,7 @@ static int proc_root_readdir(struct file * filp, */ static const struct file_operations proc_root_operations = { .read = generic_read_dir, - .readdir = proc_root_readdir, + .iterate = proc_root_readdir, .llseek = default_llseek, }; diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 3e636d864d56..dbf61f6174f0 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -11,6 +11,7 @@ #include <linux/rmap.h> #include <linux/swap.h> #include <linux/swapops.h> +#include <linux/mmu_notifier.h> #include <asm/elf.h> #include <asm/uaccess.h> @@ -688,10 +689,58 @@ const struct file_operations proc_tid_smaps_operations = { .release = seq_release_private, }; +/* + * We do not want to have constant page-shift bits sitting in + * pagemap entries and are about to reuse them some time soon. + * + * Here's the "migration strategy": + * 1. when the system boots these bits remain what they are, + * but a warning about future change is printed in log; + * 2. once anyone clears soft-dirty bits via clear_refs file, + * these flag is set to denote, that user is aware of the + * new API and those page-shift bits change their meaning. + * The respective warning is printed in dmesg; + * 3. In a couple of releases we will remove all the mentions + * of page-shift in pagemap entries. + */ + +static bool soft_dirty_cleared __read_mostly; + +enum clear_refs_types { + CLEAR_REFS_ALL = 1, + CLEAR_REFS_ANON, + CLEAR_REFS_MAPPED, + CLEAR_REFS_SOFT_DIRTY, + CLEAR_REFS_LAST, +}; + +struct clear_refs_private { + struct vm_area_struct *vma; + enum clear_refs_types type; +}; + +static inline void clear_soft_dirty(struct vm_area_struct *vma, + unsigned long addr, pte_t *pte) +{ +#ifdef CONFIG_MEM_SOFT_DIRTY + /* + * The soft-dirty tracker uses #PF-s to catch writes + * to pages, so write-protect the pte as well. See the + * Documentation/vm/soft-dirty.txt for full description + * of how soft-dirty works. + */ + pte_t ptent = *pte; + ptent = pte_wrprotect(ptent); + ptent = pte_clear_flags(ptent, _PAGE_SOFT_DIRTY); + set_pte_at(vma->vm_mm, addr, pte, ptent); +#endif +} + static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, struct mm_walk *walk) { - struct vm_area_struct *vma = walk->private; + struct clear_refs_private *cp = walk->private; + struct vm_area_struct *vma = cp->vma; pte_t *pte, ptent; spinlock_t *ptl; struct page *page; @@ -706,6 +755,11 @@ static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr, if (!pte_present(ptent)) continue; + if (cp->type == CLEAR_REFS_SOFT_DIRTY) { + clear_soft_dirty(vma, addr, pte); + continue; + } + page = vm_normal_page(vma, addr, ptent); if (!page) continue; @@ -719,10 +773,6 @@ static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr, return 0; } -#define CLEAR_REFS_ALL 1 -#define CLEAR_REFS_ANON 2 -#define CLEAR_REFS_MAPPED 3 - static ssize_t clear_refs_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { @@ -730,7 +780,8 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf, char buffer[PROC_NUMBUF]; struct mm_struct *mm; struct vm_area_struct *vma; - int type; + enum clear_refs_types type; + int itype; int rv; memset(buffer, 0, sizeof(buffer)); @@ -738,23 +789,37 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf, count = sizeof(buffer) - 1; if (copy_from_user(buffer, buf, count)) return -EFAULT; - rv = kstrtoint(strstrip(buffer), 10, &type); + rv = kstrtoint(strstrip(buffer), 10, &itype); if (rv < 0) return rv; - if (type < CLEAR_REFS_ALL || type > CLEAR_REFS_MAPPED) + type = (enum clear_refs_types)itype; + if (type < CLEAR_REFS_ALL || type >= CLEAR_REFS_LAST) return -EINVAL; + + if (type == CLEAR_REFS_SOFT_DIRTY) { + soft_dirty_cleared = true; + pr_warn_once("The pagemap bits 55-60 has changed their meaning! " + "See the linux/Documentation/vm/pagemap.txt for details.\n"); + } + task = get_proc_task(file_inode(file)); if (!task) return -ESRCH; mm = get_task_mm(task); if (mm) { + struct clear_refs_private cp = { + .type = type, + }; struct mm_walk clear_refs_walk = { .pmd_entry = clear_refs_pte_range, .mm = mm, + .private = &cp, }; down_read(&mm->mmap_sem); + if (type == CLEAR_REFS_SOFT_DIRTY) + mmu_notifier_invalidate_range_start(mm, 0, -1); for (vma = mm->mmap; vma; vma = vma->vm_next) { - clear_refs_walk.private = vma; + cp.vma = vma; if (is_vm_hugetlb_page(vma)) continue; /* @@ -773,6 +838,8 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf, walk_page_range(vma->vm_start, vma->vm_end, &clear_refs_walk); } + if (type == CLEAR_REFS_SOFT_DIRTY) + mmu_notifier_invalidate_range_end(mm, 0, -1); flush_tlb_mm(mm); up_read(&mm->mmap_sem); mmput(mm); @@ -794,6 +861,7 @@ typedef struct { struct pagemapread { int pos, len; pagemap_entry_t *buffer; + bool v2; }; #define PAGEMAP_WALK_SIZE (PMD_SIZE) @@ -807,14 +875,17 @@ struct pagemapread { #define PM_PSHIFT_BITS 6 #define PM_PSHIFT_OFFSET (PM_STATUS_OFFSET - PM_PSHIFT_BITS) #define PM_PSHIFT_MASK (((1LL << PM_PSHIFT_BITS) - 1) << PM_PSHIFT_OFFSET) -#define PM_PSHIFT(x) (((u64) (x) << PM_PSHIFT_OFFSET) & PM_PSHIFT_MASK) +#define __PM_PSHIFT(x) (((u64) (x) << PM_PSHIFT_OFFSET) & PM_PSHIFT_MASK) #define PM_PFRAME_MASK ((1LL << PM_PSHIFT_OFFSET) - 1) #define PM_PFRAME(x) ((x) & PM_PFRAME_MASK) +/* in "new" pagemap pshift bits are occupied with more status bits */ +#define PM_STATUS2(v2, x) (__PM_PSHIFT(v2 ? x : PAGE_SHIFT)) +#define __PM_SOFT_DIRTY (1LL) #define PM_PRESENT PM_STATUS(4LL) #define PM_SWAP PM_STATUS(2LL) #define PM_FILE PM_STATUS(1LL) -#define PM_NOT_PRESENT PM_PSHIFT(PAGE_SHIFT) +#define PM_NOT_PRESENT(v2) PM_STATUS2(v2, 0) #define PM_END_OF_BUFFER 1 static inline pagemap_entry_t make_pme(u64 val) @@ -837,7 +908,7 @@ static int pagemap_pte_hole(unsigned long start, unsigned long end, struct pagemapread *pm = walk->private; unsigned long addr; int err = 0; - pagemap_entry_t pme = make_pme(PM_NOT_PRESENT); + pagemap_entry_t pme = make_pme(PM_NOT_PRESENT(pm->v2)); for (addr = start; addr < end; addr += PAGE_SIZE) { err = add_to_pagemap(addr, &pme, pm); @@ -847,11 +918,12 @@ static int pagemap_pte_hole(unsigned long start, unsigned long end, return err; } -static void pte_to_pagemap_entry(pagemap_entry_t *pme, +static void pte_to_pagemap_entry(pagemap_entry_t *pme, struct pagemapread *pm, struct vm_area_struct *vma, unsigned long addr, pte_t pte) { u64 frame, flags; struct page *page = NULL; + int flags2 = 0; if (pte_present(pte)) { frame = pte_pfn(pte); @@ -866,19 +938,21 @@ static void pte_to_pagemap_entry(pagemap_entry_t *pme, if (is_migration_entry(entry)) page = migration_entry_to_page(entry); } else { - *pme = make_pme(PM_NOT_PRESENT); + *pme = make_pme(PM_NOT_PRESENT(pm->v2)); return; } if (page && !PageAnon(page)) flags |= PM_FILE; + if (pte_soft_dirty(pte)) + flags2 |= __PM_SOFT_DIRTY; - *pme = make_pme(PM_PFRAME(frame) | PM_PSHIFT(PAGE_SHIFT) | flags); + *pme = make_pme(PM_PFRAME(frame) | PM_STATUS2(pm->v2, flags2) | flags); } #ifdef CONFIG_TRANSPARENT_HUGEPAGE -static void thp_pmd_to_pagemap_entry(pagemap_entry_t *pme, - pmd_t pmd, int offset) +static void thp_pmd_to_pagemap_entry(pagemap_entry_t *pme, struct pagemapread *pm, + pmd_t pmd, int offset, int pmd_flags2) { /* * Currently pmd for thp is always present because thp can not be @@ -887,13 +961,13 @@ static void thp_pmd_to_pagemap_entry(pagemap_entry_t *pme, */ if (pmd_present(pmd)) *pme = make_pme(PM_PFRAME(pmd_pfn(pmd) + offset) - | PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT); + | PM_STATUS2(pm->v2, pmd_flags2) | PM_PRESENT); else - *pme = make_pme(PM_NOT_PRESENT); + *pme = make_pme(PM_NOT_PRESENT(pm->v2)); } #else -static inline void thp_pmd_to_pagemap_entry(pagemap_entry_t *pme, - pmd_t pmd, int offset) +static inline void thp_pmd_to_pagemap_entry(pagemap_entry_t *pme, struct pagemapread *pm, + pmd_t pmd, int offset, int pmd_flags2) { } #endif @@ -905,17 +979,20 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, struct pagemapread *pm = walk->private; pte_t *pte; int err = 0; - pagemap_entry_t pme = make_pme(PM_NOT_PRESENT); + pagemap_entry_t pme = make_pme(PM_NOT_PRESENT(pm->v2)); /* find the first VMA at or above 'addr' */ vma = find_vma(walk->mm, addr); if (vma && pmd_trans_huge_lock(pmd, vma) == 1) { + int pmd_flags2; + + pmd_flags2 = (pmd_soft_dirty(*pmd) ? __PM_SOFT_DIRTY : 0); for (; addr != end; addr += PAGE_SIZE) { unsigned long offset; offset = (addr & ~PAGEMAP_WALK_MASK) >> PAGE_SHIFT; - thp_pmd_to_pagemap_entry(&pme, *pmd, offset); + thp_pmd_to_pagemap_entry(&pme, pm, *pmd, offset, pmd_flags2); err = add_to_pagemap(addr, &pme, pm); if (err) break; @@ -932,7 +1009,7 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, * and need a new, higher one */ if (vma && (addr >= vma->vm_end)) { vma = find_vma(walk->mm, addr); - pme = make_pme(PM_NOT_PRESENT); + pme = make_pme(PM_NOT_PRESENT(pm->v2)); } /* check that 'vma' actually covers this address, @@ -940,7 +1017,7 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, if (vma && (vma->vm_start <= addr) && !is_vm_hugetlb_page(vma)) { pte = pte_offset_map(pmd, addr); - pte_to_pagemap_entry(&pme, vma, addr, *pte); + pte_to_pagemap_entry(&pme, pm, vma, addr, *pte); /* unmap before userspace copy */ pte_unmap(pte); } @@ -955,14 +1032,14 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, } #ifdef CONFIG_HUGETLB_PAGE -static void huge_pte_to_pagemap_entry(pagemap_entry_t *pme, +static void huge_pte_to_pagemap_entry(pagemap_entry_t *pme, struct pagemapread *pm, pte_t pte, int offset) { if (pte_present(pte)) *pme = make_pme(PM_PFRAME(pte_pfn(pte) + offset) - | PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT); + | PM_STATUS2(pm->v2, 0) | PM_PRESENT); else - *pme = make_pme(PM_NOT_PRESENT); + *pme = make_pme(PM_NOT_PRESENT(pm->v2)); } /* This function walks within one hugetlb entry in the single call */ @@ -976,7 +1053,7 @@ static int pagemap_hugetlb_range(pte_t *pte, unsigned long hmask, for (; addr != end; addr += PAGE_SIZE) { int offset = (addr & ~hmask) >> PAGE_SHIFT; - huge_pte_to_pagemap_entry(&pme, *pte, offset); + huge_pte_to_pagemap_entry(&pme, pm, *pte, offset); err = add_to_pagemap(addr, &pme, pm); if (err) return err; @@ -1038,6 +1115,7 @@ static ssize_t pagemap_read(struct file *file, char __user *buf, if (!count) goto out_task; + pm.v2 = soft_dirty_cleared; pm.len = PM_ENTRY_BYTES * (PAGEMAP_WALK_SIZE >> PAGE_SHIFT); pm.buffer = kmalloc(pm.len, GFP_TEMPORARY); ret = -ENOMEM; @@ -1110,9 +1188,18 @@ out: return ret; } +static int pagemap_open(struct inode *inode, struct file *file) +{ + pr_warn_once("Bits 55-60 of /proc/PID/pagemap entries are about " + "to stop being page-shift some time soon. See the " + "linux/Documentation/vm/pagemap.txt for details.\n"); + return 0; +} + const struct file_operations proc_pagemap_operations = { .llseek = mem_lseek, /* borrow this */ .read = pagemap_read, + .open = pagemap_open, }; #endif /* CONFIG_PROC_PAGE_MONITOR */ diff --git a/fs/proc/uptime.c b/fs/proc/uptime.c index 9610ac772d7e..061894625903 100644 --- a/fs/proc/uptime.c +++ b/fs/proc/uptime.c @@ -20,8 +20,7 @@ static int uptime_proc_show(struct seq_file *m, void *v) for_each_possible_cpu(i) idletime += (__force u64) kcpustat_cpu(i).cpustat[CPUTIME_IDLE]; - do_posix_clock_monotonic_gettime(&uptime); - monotonic_to_bootbased(&uptime); + get_monotonic_boottime(&uptime); nsec = cputime64_to_jiffies64(idletime) * TICK_NSEC; idle.tv_sec = div_u64_rem(nsec, NSEC_PER_SEC, &rem); idle.tv_nsec = rem; diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c index 17f7e080d7ff..28503172f2e4 100644 --- a/fs/proc/vmcore.c +++ b/fs/proc/vmcore.c @@ -20,6 +20,7 @@ #include <linux/init.h> #include <linux/crash_dump.h> #include <linux/list.h> +#include <linux/vmalloc.h> #include <asm/uaccess.h> #include <asm/io.h> #include "internal.h" @@ -32,6 +33,10 @@ static LIST_HEAD(vmcore_list); /* Stores the pointer to the buffer containing kernel elf core headers. */ static char *elfcorebuf; static size_t elfcorebuf_sz; +static size_t elfcorebuf_sz_orig; + +static char *elfnotes_buf; +static size_t elfnotes_sz; /* Total size of vmcore file. */ static u64 vmcore_size; @@ -118,27 +123,6 @@ static ssize_t read_from_oldmem(char *buf, size_t count, return read; } -/* Maps vmcore file offset to respective physical address in memroy. */ -static u64 map_offset_to_paddr(loff_t offset, struct list_head *vc_list, - struct vmcore **m_ptr) -{ - struct vmcore *m; - u64 paddr; - - list_for_each_entry(m, vc_list, list) { - u64 start, end; - start = m->offset; - end = m->offset + m->size - 1; - if (offset >= start && offset <= end) { - paddr = m->paddr + offset - start; - *m_ptr = m; - return paddr; - } - } - *m_ptr = NULL; - return 0; -} - /* Read from the ELF header and then the crash dump. On error, negative value is * returned otherwise number of bytes read are returned. */ @@ -147,8 +131,8 @@ static ssize_t read_vmcore(struct file *file, char __user *buffer, { ssize_t acc = 0, tmp; size_t tsz; - u64 start, nr_bytes; - struct vmcore *curr_m = NULL; + u64 start; + struct vmcore *m = NULL; if (buflen == 0 || *fpos >= vmcore_size) return 0; @@ -159,9 +143,7 @@ static ssize_t read_vmcore(struct file *file, char __user *buffer, /* Read ELF core header */ if (*fpos < elfcorebuf_sz) { - tsz = elfcorebuf_sz - *fpos; - if (buflen < tsz) - tsz = buflen; + tsz = min(elfcorebuf_sz - (size_t)*fpos, buflen); if (copy_to_user(buffer, elfcorebuf + *fpos, tsz)) return -EFAULT; buflen -= tsz; @@ -174,39 +156,161 @@ static ssize_t read_vmcore(struct file *file, char __user *buffer, return acc; } - start = map_offset_to_paddr(*fpos, &vmcore_list, &curr_m); - if (!curr_m) - return -EINVAL; - - while (buflen) { - tsz = min_t(size_t, buflen, PAGE_SIZE - (start & ~PAGE_MASK)); + /* Read Elf note segment */ + if (*fpos < elfcorebuf_sz + elfnotes_sz) { + void *kaddr; - /* Calculate left bytes in current memory segment. */ - nr_bytes = (curr_m->size - (start - curr_m->paddr)); - if (tsz > nr_bytes) - tsz = nr_bytes; - - tmp = read_from_oldmem(buffer, tsz, &start, 1); - if (tmp < 0) - return tmp; + tsz = min(elfcorebuf_sz + elfnotes_sz - (size_t)*fpos, buflen); + kaddr = elfnotes_buf + *fpos - elfcorebuf_sz; + if (copy_to_user(buffer, kaddr, tsz)) + return -EFAULT; buflen -= tsz; *fpos += tsz; buffer += tsz; acc += tsz; - if (start >= (curr_m->paddr + curr_m->size)) { - if (curr_m->list.next == &vmcore_list) - return acc; /*EOF*/ - curr_m = list_entry(curr_m->list.next, - struct vmcore, list); - start = curr_m->paddr; + + /* leave now if filled buffer already */ + if (buflen == 0) + return acc; + } + + list_for_each_entry(m, &vmcore_list, list) { + if (*fpos < m->offset + m->size) { + tsz = min_t(size_t, m->offset + m->size - *fpos, buflen); + start = m->paddr + *fpos - m->offset; + tmp = read_from_oldmem(buffer, tsz, &start, 1); + if (tmp < 0) + return tmp; + buflen -= tsz; + *fpos += tsz; + buffer += tsz; + acc += tsz; + + /* leave now if filled buffer already */ + if (buflen == 0) + return acc; } } + return acc; } +/** + * alloc_elfnotes_buf - allocate buffer for ELF note segment in + * vmalloc memory + * + * @notes_sz: size of buffer + * + * If CONFIG_MMU is defined, use vmalloc_user() to allow users to mmap + * the buffer to user-space by means of remap_vmalloc_range(). + * + * If CONFIG_MMU is not defined, use vzalloc() since mmap_vmcore() is + * disabled and there's no need to allow users to mmap the buffer. + */ +static inline char *alloc_elfnotes_buf(size_t notes_sz) +{ +#ifdef CONFIG_MMU + return vmalloc_user(notes_sz); +#else + return vzalloc(notes_sz); +#endif +} + +/* + * Disable mmap_vmcore() if CONFIG_MMU is not defined. MMU is + * essential for mmap_vmcore() in order to map physically + * non-contiguous objects (ELF header, ELF note segment and memory + * regions in the 1st kernel pointed to by PT_LOAD entries) into + * virtually contiguous user-space in ELF layout. + */ +#ifdef CONFIG_MMU +static int mmap_vmcore(struct file *file, struct vm_area_struct *vma) +{ + size_t size = vma->vm_end - vma->vm_start; + u64 start, end, len, tsz; + struct vmcore *m; + + start = (u64)vma->vm_pgoff << PAGE_SHIFT; + end = start + size; + + if (size > vmcore_size || end > vmcore_size) + return -EINVAL; + + if (vma->vm_flags & (VM_WRITE | VM_EXEC)) + return -EPERM; + + vma->vm_flags &= ~(VM_MAYWRITE | VM_MAYEXEC); + vma->vm_flags |= VM_MIXEDMAP; + + len = 0; + + if (start < elfcorebuf_sz) { + u64 pfn; + + tsz = min(elfcorebuf_sz - (size_t)start, size); + pfn = __pa(elfcorebuf + start) >> PAGE_SHIFT; + if (remap_pfn_range(vma, vma->vm_start, pfn, tsz, + vma->vm_page_prot)) + return -EAGAIN; + size -= tsz; + start += tsz; + len += tsz; + + if (size == 0) + return 0; + } + + if (start < elfcorebuf_sz + elfnotes_sz) { + void *kaddr; + + tsz = min(elfcorebuf_sz + elfnotes_sz - (size_t)start, size); + kaddr = elfnotes_buf + start - elfcorebuf_sz; + if (remap_vmalloc_range_partial(vma, vma->vm_start + len, + kaddr, tsz)) + goto fail; + size -= tsz; + start += tsz; + len += tsz; + + if (size == 0) + return 0; + } + + list_for_each_entry(m, &vmcore_list, list) { + if (start < m->offset + m->size) { + u64 paddr = 0; + + tsz = min_t(size_t, m->offset + m->size - start, size); + paddr = m->paddr + start - m->offset; + if (remap_pfn_range(vma, vma->vm_start + len, + paddr >> PAGE_SHIFT, tsz, + vma->vm_page_prot)) + goto fail; + size -= tsz; + start += tsz; + len += tsz; + + if (size == 0) + return 0; + } + } + + return 0; +fail: + do_munmap(vma->vm_mm, vma->vm_start, len); + return -EAGAIN; +} +#else +static int mmap_vmcore(struct file *file, struct vm_area_struct *vma) +{ + return -ENOSYS; +} +#endif + static const struct file_operations proc_vmcore_operations = { .read = read_vmcore, .llseek = default_llseek, + .mmap = mmap_vmcore, }; static struct vmcore* __init get_new_element(void) @@ -214,61 +318,40 @@ static struct vmcore* __init get_new_element(void) return kzalloc(sizeof(struct vmcore), GFP_KERNEL); } -static u64 __init get_vmcore_size_elf64(char *elfptr) +static u64 __init get_vmcore_size(size_t elfsz, size_t elfnotesegsz, + struct list_head *vc_list) { - int i; - u64 size; - Elf64_Ehdr *ehdr_ptr; - Elf64_Phdr *phdr_ptr; - - ehdr_ptr = (Elf64_Ehdr *)elfptr; - phdr_ptr = (Elf64_Phdr*)(elfptr + sizeof(Elf64_Ehdr)); - size = sizeof(Elf64_Ehdr) + ((ehdr_ptr->e_phnum) * sizeof(Elf64_Phdr)); - for (i = 0; i < ehdr_ptr->e_phnum; i++) { - size += phdr_ptr->p_memsz; - phdr_ptr++; - } - return size; -} - -static u64 __init get_vmcore_size_elf32(char *elfptr) -{ - int i; u64 size; - Elf32_Ehdr *ehdr_ptr; - Elf32_Phdr *phdr_ptr; + struct vmcore *m; - ehdr_ptr = (Elf32_Ehdr *)elfptr; - phdr_ptr = (Elf32_Phdr*)(elfptr + sizeof(Elf32_Ehdr)); - size = sizeof(Elf32_Ehdr) + ((ehdr_ptr->e_phnum) * sizeof(Elf32_Phdr)); - for (i = 0; i < ehdr_ptr->e_phnum; i++) { - size += phdr_ptr->p_memsz; - phdr_ptr++; + size = elfsz + elfnotesegsz; + list_for_each_entry(m, vc_list, list) { + size += m->size; } return size; } -/* Merges all the PT_NOTE headers into one. */ -static int __init merge_note_headers_elf64(char *elfptr, size_t *elfsz, - struct list_head *vc_list) +/** + * update_note_header_size_elf64 - update p_memsz member of each PT_NOTE entry + * + * @ehdr_ptr: ELF header + * + * This function updates p_memsz member of each PT_NOTE entry in the + * program header table pointed to by @ehdr_ptr to real size of ELF + * note segment. + */ +static int __init update_note_header_size_elf64(const Elf64_Ehdr *ehdr_ptr) { - int i, nr_ptnote=0, rc=0; - char *tmp; - Elf64_Ehdr *ehdr_ptr; - Elf64_Phdr phdr, *phdr_ptr; + int i, rc=0; + Elf64_Phdr *phdr_ptr; Elf64_Nhdr *nhdr_ptr; - u64 phdr_sz = 0, note_off; - ehdr_ptr = (Elf64_Ehdr *)elfptr; - phdr_ptr = (Elf64_Phdr*)(elfptr + sizeof(Elf64_Ehdr)); + phdr_ptr = (Elf64_Phdr *)(ehdr_ptr + 1); for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) { - int j; void *notes_section; - struct vmcore *new; u64 offset, max_sz, sz, real_sz = 0; if (phdr_ptr->p_type != PT_NOTE) continue; - nr_ptnote++; max_sz = phdr_ptr->p_memsz; offset = phdr_ptr->p_offset; notes_section = kmalloc(max_sz, GFP_KERNEL); @@ -280,7 +363,7 @@ static int __init merge_note_headers_elf64(char *elfptr, size_t *elfsz, return rc; } nhdr_ptr = notes_section; - for (j = 0; j < max_sz; j += sz) { + while (real_sz < max_sz) { if (nhdr_ptr->n_namesz == 0) break; sz = sizeof(Elf64_Nhdr) + @@ -289,26 +372,122 @@ static int __init merge_note_headers_elf64(char *elfptr, size_t *elfsz, real_sz += sz; nhdr_ptr = (Elf64_Nhdr*)((char*)nhdr_ptr + sz); } - - /* Add this contiguous chunk of notes section to vmcore list.*/ - new = get_new_element(); - if (!new) { - kfree(notes_section); - return -ENOMEM; - } - new->paddr = phdr_ptr->p_offset; - new->size = real_sz; - list_add_tail(&new->list, vc_list); - phdr_sz += real_sz; kfree(notes_section); + phdr_ptr->p_memsz = real_sz; + } + + return 0; +} + +/** + * get_note_number_and_size_elf64 - get the number of PT_NOTE program + * headers and sum of real size of their ELF note segment headers and + * data. + * + * @ehdr_ptr: ELF header + * @nr_ptnote: buffer for the number of PT_NOTE program headers + * @sz_ptnote: buffer for size of unique PT_NOTE program header + * + * This function is used to merge multiple PT_NOTE program headers + * into a unique single one. The resulting unique entry will have + * @sz_ptnote in its phdr->p_mem. + * + * It is assumed that program headers with PT_NOTE type pointed to by + * @ehdr_ptr has already been updated by update_note_header_size_elf64 + * and each of PT_NOTE program headers has actual ELF note segment + * size in its p_memsz member. + */ +static int __init get_note_number_and_size_elf64(const Elf64_Ehdr *ehdr_ptr, + int *nr_ptnote, u64 *sz_ptnote) +{ + int i; + Elf64_Phdr *phdr_ptr; + + *nr_ptnote = *sz_ptnote = 0; + + phdr_ptr = (Elf64_Phdr *)(ehdr_ptr + 1); + for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) { + if (phdr_ptr->p_type != PT_NOTE) + continue; + *nr_ptnote += 1; + *sz_ptnote += phdr_ptr->p_memsz; + } + + return 0; +} + +/** + * copy_notes_elf64 - copy ELF note segments in a given buffer + * + * @ehdr_ptr: ELF header + * @notes_buf: buffer into which ELF note segments are copied + * + * This function is used to copy ELF note segment in the 1st kernel + * into the buffer @notes_buf in the 2nd kernel. It is assumed that + * size of the buffer @notes_buf is equal to or larger than sum of the + * real ELF note segment headers and data. + * + * It is assumed that program headers with PT_NOTE type pointed to by + * @ehdr_ptr has already been updated by update_note_header_size_elf64 + * and each of PT_NOTE program headers has actual ELF note segment + * size in its p_memsz member. + */ +static int __init copy_notes_elf64(const Elf64_Ehdr *ehdr_ptr, char *notes_buf) +{ + int i, rc=0; + Elf64_Phdr *phdr_ptr; + + phdr_ptr = (Elf64_Phdr*)(ehdr_ptr + 1); + + for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) { + u64 offset; + if (phdr_ptr->p_type != PT_NOTE) + continue; + offset = phdr_ptr->p_offset; + rc = read_from_oldmem(notes_buf, phdr_ptr->p_memsz, &offset, 0); + if (rc < 0) + return rc; + notes_buf += phdr_ptr->p_memsz; } + return 0; +} + +/* Merges all the PT_NOTE headers into one. */ +static int __init merge_note_headers_elf64(char *elfptr, size_t *elfsz, + char **notes_buf, size_t *notes_sz) +{ + int i, nr_ptnote=0, rc=0; + char *tmp; + Elf64_Ehdr *ehdr_ptr; + Elf64_Phdr phdr; + u64 phdr_sz = 0, note_off; + + ehdr_ptr = (Elf64_Ehdr *)elfptr; + + rc = update_note_header_size_elf64(ehdr_ptr); + if (rc < 0) + return rc; + + rc = get_note_number_and_size_elf64(ehdr_ptr, &nr_ptnote, &phdr_sz); + if (rc < 0) + return rc; + + *notes_sz = roundup(phdr_sz, PAGE_SIZE); + *notes_buf = alloc_elfnotes_buf(*notes_sz); + if (!*notes_buf) + return -ENOMEM; + + rc = copy_notes_elf64(ehdr_ptr, *notes_buf); + if (rc < 0) + return rc; + /* Prepare merged PT_NOTE program header. */ phdr.p_type = PT_NOTE; phdr.p_flags = 0; note_off = sizeof(Elf64_Ehdr) + (ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf64_Phdr); - phdr.p_offset = note_off; + phdr.p_offset = roundup(note_off, PAGE_SIZE); phdr.p_vaddr = phdr.p_paddr = 0; phdr.p_filesz = phdr.p_memsz = phdr_sz; phdr.p_align = 0; @@ -322,6 +501,8 @@ static int __init merge_note_headers_elf64(char *elfptr, size_t *elfsz, i = (nr_ptnote - 1) * sizeof(Elf64_Phdr); *elfsz = *elfsz - i; memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf64_Ehdr)-sizeof(Elf64_Phdr))); + memset(elfptr + *elfsz, 0, i); + *elfsz = roundup(*elfsz, PAGE_SIZE); /* Modify e_phnum to reflect merged headers. */ ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1; @@ -329,27 +510,27 @@ static int __init merge_note_headers_elf64(char *elfptr, size_t *elfsz, return 0; } -/* Merges all the PT_NOTE headers into one. */ -static int __init merge_note_headers_elf32(char *elfptr, size_t *elfsz, - struct list_head *vc_list) +/** + * update_note_header_size_elf32 - update p_memsz member of each PT_NOTE entry + * + * @ehdr_ptr: ELF header + * + * This function updates p_memsz member of each PT_NOTE entry in the + * program header table pointed to by @ehdr_ptr to real size of ELF + * note segment. + */ +static int __init update_note_header_size_elf32(const Elf32_Ehdr *ehdr_ptr) { - int i, nr_ptnote=0, rc=0; - char *tmp; - Elf32_Ehdr *ehdr_ptr; - Elf32_Phdr phdr, *phdr_ptr; + int i, rc=0; + Elf32_Phdr *phdr_ptr; Elf32_Nhdr *nhdr_ptr; - u64 phdr_sz = 0, note_off; - ehdr_ptr = (Elf32_Ehdr *)elfptr; - phdr_ptr = (Elf32_Phdr*)(elfptr + sizeof(Elf32_Ehdr)); + phdr_ptr = (Elf32_Phdr *)(ehdr_ptr + 1); for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) { - int j; void *notes_section; - struct vmcore *new; u64 offset, max_sz, sz, real_sz = 0; if (phdr_ptr->p_type != PT_NOTE) continue; - nr_ptnote++; max_sz = phdr_ptr->p_memsz; offset = phdr_ptr->p_offset; notes_section = kmalloc(max_sz, GFP_KERNEL); @@ -361,7 +542,7 @@ static int __init merge_note_headers_elf32(char *elfptr, size_t *elfsz, return rc; } nhdr_ptr = notes_section; - for (j = 0; j < max_sz; j += sz) { + while (real_sz < max_sz) { if (nhdr_ptr->n_namesz == 0) break; sz = sizeof(Elf32_Nhdr) + @@ -370,26 +551,122 @@ static int __init merge_note_headers_elf32(char *elfptr, size_t *elfsz, real_sz += sz; nhdr_ptr = (Elf32_Nhdr*)((char*)nhdr_ptr + sz); } - - /* Add this contiguous chunk of notes section to vmcore list.*/ - new = get_new_element(); - if (!new) { - kfree(notes_section); - return -ENOMEM; - } - new->paddr = phdr_ptr->p_offset; - new->size = real_sz; - list_add_tail(&new->list, vc_list); - phdr_sz += real_sz; kfree(notes_section); + phdr_ptr->p_memsz = real_sz; + } + + return 0; +} + +/** + * get_note_number_and_size_elf32 - get the number of PT_NOTE program + * headers and sum of real size of their ELF note segment headers and + * data. + * + * @ehdr_ptr: ELF header + * @nr_ptnote: buffer for the number of PT_NOTE program headers + * @sz_ptnote: buffer for size of unique PT_NOTE program header + * + * This function is used to merge multiple PT_NOTE program headers + * into a unique single one. The resulting unique entry will have + * @sz_ptnote in its phdr->p_mem. + * + * It is assumed that program headers with PT_NOTE type pointed to by + * @ehdr_ptr has already been updated by update_note_header_size_elf32 + * and each of PT_NOTE program headers has actual ELF note segment + * size in its p_memsz member. + */ +static int __init get_note_number_and_size_elf32(const Elf32_Ehdr *ehdr_ptr, + int *nr_ptnote, u64 *sz_ptnote) +{ + int i; + Elf32_Phdr *phdr_ptr; + + *nr_ptnote = *sz_ptnote = 0; + + phdr_ptr = (Elf32_Phdr *)(ehdr_ptr + 1); + for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) { + if (phdr_ptr->p_type != PT_NOTE) + continue; + *nr_ptnote += 1; + *sz_ptnote += phdr_ptr->p_memsz; + } + + return 0; +} + +/** + * copy_notes_elf32 - copy ELF note segments in a given buffer + * + * @ehdr_ptr: ELF header + * @notes_buf: buffer into which ELF note segments are copied + * + * This function is used to copy ELF note segment in the 1st kernel + * into the buffer @notes_buf in the 2nd kernel. It is assumed that + * size of the buffer @notes_buf is equal to or larger than sum of the + * real ELF note segment headers and data. + * + * It is assumed that program headers with PT_NOTE type pointed to by + * @ehdr_ptr has already been updated by update_note_header_size_elf32 + * and each of PT_NOTE program headers has actual ELF note segment + * size in its p_memsz member. + */ +static int __init copy_notes_elf32(const Elf32_Ehdr *ehdr_ptr, char *notes_buf) +{ + int i, rc=0; + Elf32_Phdr *phdr_ptr; + + phdr_ptr = (Elf32_Phdr*)(ehdr_ptr + 1); + + for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) { + u64 offset; + if (phdr_ptr->p_type != PT_NOTE) + continue; + offset = phdr_ptr->p_offset; + rc = read_from_oldmem(notes_buf, phdr_ptr->p_memsz, &offset, 0); + if (rc < 0) + return rc; + notes_buf += phdr_ptr->p_memsz; } + return 0; +} + +/* Merges all the PT_NOTE headers into one. */ +static int __init merge_note_headers_elf32(char *elfptr, size_t *elfsz, + char **notes_buf, size_t *notes_sz) +{ + int i, nr_ptnote=0, rc=0; + char *tmp; + Elf32_Ehdr *ehdr_ptr; + Elf32_Phdr phdr; + u64 phdr_sz = 0, note_off; + + ehdr_ptr = (Elf32_Ehdr *)elfptr; + + rc = update_note_header_size_elf32(ehdr_ptr); + if (rc < 0) + return rc; + + rc = get_note_number_and_size_elf32(ehdr_ptr, &nr_ptnote, &phdr_sz); + if (rc < 0) + return rc; + + *notes_sz = roundup(phdr_sz, PAGE_SIZE); + *notes_buf = alloc_elfnotes_buf(*notes_sz); + if (!*notes_buf) + return -ENOMEM; + + rc = copy_notes_elf32(ehdr_ptr, *notes_buf); + if (rc < 0) + return rc; + /* Prepare merged PT_NOTE program header. */ phdr.p_type = PT_NOTE; phdr.p_flags = 0; note_off = sizeof(Elf32_Ehdr) + (ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf32_Phdr); - phdr.p_offset = note_off; + phdr.p_offset = roundup(note_off, PAGE_SIZE); phdr.p_vaddr = phdr.p_paddr = 0; phdr.p_filesz = phdr.p_memsz = phdr_sz; phdr.p_align = 0; @@ -403,6 +680,8 @@ static int __init merge_note_headers_elf32(char *elfptr, size_t *elfsz, i = (nr_ptnote - 1) * sizeof(Elf32_Phdr); *elfsz = *elfsz - i; memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf32_Ehdr)-sizeof(Elf32_Phdr))); + memset(elfptr + *elfsz, 0, i); + *elfsz = roundup(*elfsz, PAGE_SIZE); /* Modify e_phnum to reflect merged headers. */ ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1; @@ -414,6 +693,7 @@ static int __init merge_note_headers_elf32(char *elfptr, size_t *elfsz, * the new offset fields of exported program headers. */ static int __init process_ptload_program_headers_elf64(char *elfptr, size_t elfsz, + size_t elfnotes_sz, struct list_head *vc_list) { int i; @@ -425,32 +705,38 @@ static int __init process_ptload_program_headers_elf64(char *elfptr, ehdr_ptr = (Elf64_Ehdr *)elfptr; phdr_ptr = (Elf64_Phdr*)(elfptr + sizeof(Elf64_Ehdr)); /* PT_NOTE hdr */ - /* First program header is PT_NOTE header. */ - vmcore_off = sizeof(Elf64_Ehdr) + - (ehdr_ptr->e_phnum) * sizeof(Elf64_Phdr) + - phdr_ptr->p_memsz; /* Note sections */ + /* Skip Elf header, program headers and Elf note segment. */ + vmcore_off = elfsz + elfnotes_sz; for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) { + u64 paddr, start, end, size; + if (phdr_ptr->p_type != PT_LOAD) continue; + paddr = phdr_ptr->p_offset; + start = rounddown(paddr, PAGE_SIZE); + end = roundup(paddr + phdr_ptr->p_memsz, PAGE_SIZE); + size = end - start; + /* Add this contiguous chunk of memory to vmcore list.*/ new = get_new_element(); if (!new) return -ENOMEM; - new->paddr = phdr_ptr->p_offset; - new->size = phdr_ptr->p_memsz; + new->paddr = start; + new->size = size; list_add_tail(&new->list, vc_list); /* Update the program header offset. */ - phdr_ptr->p_offset = vmcore_off; - vmcore_off = vmcore_off + phdr_ptr->p_memsz; + phdr_ptr->p_offset = vmcore_off + (paddr - start); + vmcore_off = vmcore_off + size; } return 0; } static int __init process_ptload_program_headers_elf32(char *elfptr, size_t elfsz, + size_t elfnotes_sz, struct list_head *vc_list) { int i; @@ -462,43 +748,44 @@ static int __init process_ptload_program_headers_elf32(char *elfptr, ehdr_ptr = (Elf32_Ehdr *)elfptr; phdr_ptr = (Elf32_Phdr*)(elfptr + sizeof(Elf32_Ehdr)); /* PT_NOTE hdr */ - /* First program header is PT_NOTE header. */ - vmcore_off = sizeof(Elf32_Ehdr) + - (ehdr_ptr->e_phnum) * sizeof(Elf32_Phdr) + - phdr_ptr->p_memsz; /* Note sections */ + /* Skip Elf header, program headers and Elf note segment. */ + vmcore_off = elfsz + elfnotes_sz; for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) { + u64 paddr, start, end, size; + if (phdr_ptr->p_type != PT_LOAD) continue; + paddr = phdr_ptr->p_offset; + start = rounddown(paddr, PAGE_SIZE); + end = roundup(paddr + phdr_ptr->p_memsz, PAGE_SIZE); + size = end - start; + /* Add this contiguous chunk of memory to vmcore list.*/ new = get_new_element(); if (!new) return -ENOMEM; - new->paddr = phdr_ptr->p_offset; - new->size = phdr_ptr->p_memsz; + new->paddr = start; + new->size = size; list_add_tail(&new->list, vc_list); /* Update the program header offset */ - phdr_ptr->p_offset = vmcore_off; - vmcore_off = vmcore_off + phdr_ptr->p_memsz; + phdr_ptr->p_offset = vmcore_off + (paddr - start); + vmcore_off = vmcore_off + size; } return 0; } /* Sets offset fields of vmcore elements. */ -static void __init set_vmcore_list_offsets_elf64(char *elfptr, - struct list_head *vc_list) +static void __init set_vmcore_list_offsets(size_t elfsz, size_t elfnotes_sz, + struct list_head *vc_list) { loff_t vmcore_off; - Elf64_Ehdr *ehdr_ptr; struct vmcore *m; - ehdr_ptr = (Elf64_Ehdr *)elfptr; - - /* Skip Elf header and program headers. */ - vmcore_off = sizeof(Elf64_Ehdr) + - (ehdr_ptr->e_phnum) * sizeof(Elf64_Phdr); + /* Skip Elf header, program headers and Elf note segment. */ + vmcore_off = elfsz + elfnotes_sz; list_for_each_entry(m, vc_list, list) { m->offset = vmcore_off; @@ -506,24 +793,12 @@ static void __init set_vmcore_list_offsets_elf64(char *elfptr, } } -/* Sets offset fields of vmcore elements. */ -static void __init set_vmcore_list_offsets_elf32(char *elfptr, - struct list_head *vc_list) +static void free_elfcorebuf(void) { - loff_t vmcore_off; - Elf32_Ehdr *ehdr_ptr; - struct vmcore *m; - - ehdr_ptr = (Elf32_Ehdr *)elfptr; - - /* Skip Elf header and program headers. */ - vmcore_off = sizeof(Elf32_Ehdr) + - (ehdr_ptr->e_phnum) * sizeof(Elf32_Phdr); - - list_for_each_entry(m, vc_list, list) { - m->offset = vmcore_off; - vmcore_off += m->size; - } + free_pages((unsigned long)elfcorebuf, get_order(elfcorebuf_sz_orig)); + elfcorebuf = NULL; + vfree(elfnotes_buf); + elfnotes_buf = NULL; } static int __init parse_crash_elf64_headers(void) @@ -554,31 +829,32 @@ static int __init parse_crash_elf64_headers(void) } /* Read in all elf headers. */ - elfcorebuf_sz = sizeof(Elf64_Ehdr) + ehdr.e_phnum * sizeof(Elf64_Phdr); - elfcorebuf = kmalloc(elfcorebuf_sz, GFP_KERNEL); + elfcorebuf_sz_orig = sizeof(Elf64_Ehdr) + + ehdr.e_phnum * sizeof(Elf64_Phdr); + elfcorebuf_sz = elfcorebuf_sz_orig; + elfcorebuf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, + get_order(elfcorebuf_sz_orig)); if (!elfcorebuf) return -ENOMEM; addr = elfcorehdr_addr; - rc = read_from_oldmem(elfcorebuf, elfcorebuf_sz, &addr, 0); - if (rc < 0) { - kfree(elfcorebuf); - return rc; - } + rc = read_from_oldmem(elfcorebuf, elfcorebuf_sz_orig, &addr, 0); + if (rc < 0) + goto fail; /* Merge all PT_NOTE headers into one. */ - rc = merge_note_headers_elf64(elfcorebuf, &elfcorebuf_sz, &vmcore_list); - if (rc) { - kfree(elfcorebuf); - return rc; - } + rc = merge_note_headers_elf64(elfcorebuf, &elfcorebuf_sz, + &elfnotes_buf, &elfnotes_sz); + if (rc) + goto fail; rc = process_ptload_program_headers_elf64(elfcorebuf, elfcorebuf_sz, - &vmcore_list); - if (rc) { - kfree(elfcorebuf); - return rc; - } - set_vmcore_list_offsets_elf64(elfcorebuf, &vmcore_list); + elfnotes_sz, &vmcore_list); + if (rc) + goto fail; + set_vmcore_list_offsets(elfcorebuf_sz, elfnotes_sz, &vmcore_list); return 0; +fail: + free_elfcorebuf(); + return rc; } static int __init parse_crash_elf32_headers(void) @@ -609,31 +885,31 @@ static int __init parse_crash_elf32_headers(void) } /* Read in all elf headers. */ - elfcorebuf_sz = sizeof(Elf32_Ehdr) + ehdr.e_phnum * sizeof(Elf32_Phdr); - elfcorebuf = kmalloc(elfcorebuf_sz, GFP_KERNEL); + elfcorebuf_sz_orig = sizeof(Elf32_Ehdr) + ehdr.e_phnum * sizeof(Elf32_Phdr); + elfcorebuf_sz = elfcorebuf_sz_orig; + elfcorebuf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, + get_order(elfcorebuf_sz_orig)); if (!elfcorebuf) return -ENOMEM; addr = elfcorehdr_addr; - rc = read_from_oldmem(elfcorebuf, elfcorebuf_sz, &addr, 0); - if (rc < 0) { - kfree(elfcorebuf); - return rc; - } + rc = read_from_oldmem(elfcorebuf, elfcorebuf_sz_orig, &addr, 0); + if (rc < 0) + goto fail; /* Merge all PT_NOTE headers into one. */ - rc = merge_note_headers_elf32(elfcorebuf, &elfcorebuf_sz, &vmcore_list); - if (rc) { - kfree(elfcorebuf); - return rc; - } + rc = merge_note_headers_elf32(elfcorebuf, &elfcorebuf_sz, + &elfnotes_buf, &elfnotes_sz); + if (rc) + goto fail; rc = process_ptload_program_headers_elf32(elfcorebuf, elfcorebuf_sz, - &vmcore_list); - if (rc) { - kfree(elfcorebuf); - return rc; - } - set_vmcore_list_offsets_elf32(elfcorebuf, &vmcore_list); + elfnotes_sz, &vmcore_list); + if (rc) + goto fail; + set_vmcore_list_offsets(elfcorebuf_sz, elfnotes_sz, &vmcore_list); return 0; +fail: + free_elfcorebuf(); + return rc; } static int __init parse_crash_elf_headers(void) @@ -655,20 +931,19 @@ static int __init parse_crash_elf_headers(void) rc = parse_crash_elf64_headers(); if (rc) return rc; - - /* Determine vmcore size. */ - vmcore_size = get_vmcore_size_elf64(elfcorebuf); } else if (e_ident[EI_CLASS] == ELFCLASS32) { rc = parse_crash_elf32_headers(); if (rc) return rc; - - /* Determine vmcore size. */ - vmcore_size = get_vmcore_size_elf32(elfcorebuf); } else { pr_warn("Warning: Core image elf header is not sane\n"); return -EINVAL; } + + /* Determine vmcore size. */ + vmcore_size = get_vmcore_size(elfcorebuf_sz, elfnotes_sz, + &vmcore_list); + return 0; } @@ -711,7 +986,6 @@ void vmcore_cleanup(void) list_del(&m->list); kfree(m); } - kfree(elfcorebuf); - elfcorebuf = NULL; + free_elfcorebuf(); } EXPORT_SYMBOL_GPL(vmcore_cleanup); diff --git a/fs/pstore/ftrace.c b/fs/pstore/ftrace.c index 43b12807a51d..76a4eeb92982 100644 --- a/fs/pstore/ftrace.c +++ b/fs/pstore/ftrace.c @@ -44,7 +44,7 @@ static void notrace pstore_ftrace_call(unsigned long ip, rec.parent_ip = parent_ip; pstore_ftrace_encode_cpu(&rec, raw_smp_processor_id()); psinfo->write_buf(PSTORE_TYPE_FTRACE, 0, NULL, 0, (void *)&rec, - sizeof(rec), psinfo); + 0, sizeof(rec), psinfo); local_irq_restore(flags); } diff --git a/fs/pstore/inode.c b/fs/pstore/inode.c index e4bcb2cf055a..71bf5f4ae84c 100644 --- a/fs/pstore/inode.c +++ b/fs/pstore/inode.c @@ -178,6 +178,8 @@ static int pstore_unlink(struct inode *dir, struct dentry *dentry) if (p->psi->erase) p->psi->erase(p->type, p->id, p->count, dentry->d_inode->i_ctime, p->psi); + else + return -EPERM; return simple_unlink(dir, dentry); } @@ -324,6 +326,15 @@ int pstore_mkfile(enum pstore_type_id type, char *psname, u64 id, int count, case PSTORE_TYPE_MCE: sprintf(name, "mce-%s-%lld", psname, id); break; + case PSTORE_TYPE_PPC_RTAS: + sprintf(name, "rtas-%s-%lld", psname, id); + break; + case PSTORE_TYPE_PPC_OF: + sprintf(name, "powerpc-ofw-%s-%lld", psname, id); + break; + case PSTORE_TYPE_PPC_COMMON: + sprintf(name, "powerpc-common-%s-%lld", psname, id); + break; case PSTORE_TYPE_UNKNOWN: sprintf(name, "unknown-%s-%lld", psname, id); break; diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c index 86d1038b5a12..422962ae9fc2 100644 --- a/fs/pstore/platform.c +++ b/fs/pstore/platform.c @@ -159,7 +159,7 @@ static void pstore_dump(struct kmsg_dumper *dumper, break; ret = psinfo->write(PSTORE_TYPE_DMESG, reason, &id, part, - oopscount, hsize + len, psinfo); + oopscount, hsize, hsize + len, psinfo); if (ret == 0 && reason == KMSG_DUMP_OOPS && pstore_is_mounted()) pstore_new_entry = 1; @@ -196,7 +196,7 @@ static void pstore_console_write(struct console *con, const char *s, unsigned c) spin_lock_irqsave(&psinfo->buf_lock, flags); } memcpy(psinfo->buf, s, c); - psinfo->write(PSTORE_TYPE_CONSOLE, 0, &id, 0, 0, c, psinfo); + psinfo->write(PSTORE_TYPE_CONSOLE, 0, &id, 0, 0, 0, c, psinfo); spin_unlock_irqrestore(&psinfo->buf_lock, flags); s += c; c = e - s; @@ -221,9 +221,11 @@ static void pstore_register_console(void) {} static int pstore_write_compat(enum pstore_type_id type, enum kmsg_dump_reason reason, u64 *id, unsigned int part, int count, - size_t size, struct pstore_info *psi) + size_t hsize, size_t size, + struct pstore_info *psi) { - return psi->write_buf(type, reason, id, part, psinfo->buf, size, psi); + return psi->write_buf(type, reason, id, part, psinfo->buf, hsize, + size, psi); } /* @@ -239,17 +241,15 @@ int pstore_register(struct pstore_info *psi) { struct module *owner = psi->owner; + if (backend && strcmp(backend, psi->name)) + return -EPERM; + spin_lock(&pstore_lock); if (psinfo) { spin_unlock(&pstore_lock); return -EBUSY; } - if (backend && strcmp(backend, psi->name)) { - spin_unlock(&pstore_lock); - return -EINVAL; - } - if (!psi->write) psi->write = pstore_write_compat; psinfo = psi; @@ -274,6 +274,9 @@ int pstore_register(struct pstore_info *psi) add_timer(&pstore_timer); } + pr_info("pstore: Registered %s as persistent store backend\n", + psi->name); + return 0; } EXPORT_SYMBOL_GPL(pstore_register); diff --git a/fs/pstore/ram.c b/fs/pstore/ram.c index 1376e5a8f0d6..a6119f9469e2 100644 --- a/fs/pstore/ram.c +++ b/fs/pstore/ram.c @@ -195,7 +195,8 @@ static size_t ramoops_write_kmsg_hdr(struct persistent_ram_zone *prz) static int notrace ramoops_pstore_write_buf(enum pstore_type_id type, enum kmsg_dump_reason reason, u64 *id, unsigned int part, - const char *buf, size_t size, + const char *buf, + size_t hsize, size_t size, struct pstore_info *psi) { struct ramoops_context *cxt = psi->data; @@ -399,8 +400,6 @@ static int ramoops_probe(struct platform_device *pdev) goto fail_out; } - if (!is_power_of_2(pdata->mem_size)) - pdata->mem_size = rounddown_pow_of_two(pdata->mem_size); if (!is_power_of_2(pdata->record_size)) pdata->record_size = rounddown_pow_of_two(pdata->record_size); if (!is_power_of_2(pdata->console_size)) diff --git a/fs/pstore/ram_core.c b/fs/pstore/ram_core.c index 59337326e288..de272d426763 100644 --- a/fs/pstore/ram_core.c +++ b/fs/pstore/ram_core.c @@ -46,7 +46,7 @@ static inline size_t buffer_start(struct persistent_ram_zone *prz) } /* increase and wrap the start pointer, returning the old value */ -static inline size_t buffer_start_add(struct persistent_ram_zone *prz, size_t a) +static size_t buffer_start_add_atomic(struct persistent_ram_zone *prz, size_t a) { int old; int new; @@ -62,7 +62,7 @@ static inline size_t buffer_start_add(struct persistent_ram_zone *prz, size_t a) } /* increase the size counter until it hits the max size */ -static inline void buffer_size_add(struct persistent_ram_zone *prz, size_t a) +static void buffer_size_add_atomic(struct persistent_ram_zone *prz, size_t a) { size_t old; size_t new; @@ -78,6 +78,53 @@ static inline void buffer_size_add(struct persistent_ram_zone *prz, size_t a) } while (atomic_cmpxchg(&prz->buffer->size, old, new) != old); } +static DEFINE_RAW_SPINLOCK(buffer_lock); + +/* increase and wrap the start pointer, returning the old value */ +static size_t buffer_start_add_locked(struct persistent_ram_zone *prz, size_t a) +{ + int old; + int new; + unsigned long flags; + + raw_spin_lock_irqsave(&buffer_lock, flags); + + old = atomic_read(&prz->buffer->start); + new = old + a; + while (unlikely(new > prz->buffer_size)) + new -= prz->buffer_size; + atomic_set(&prz->buffer->start, new); + + raw_spin_unlock_irqrestore(&buffer_lock, flags); + + return old; +} + +/* increase the size counter until it hits the max size */ +static void buffer_size_add_locked(struct persistent_ram_zone *prz, size_t a) +{ + size_t old; + size_t new; + unsigned long flags; + + raw_spin_lock_irqsave(&buffer_lock, flags); + + old = atomic_read(&prz->buffer->size); + if (old == prz->buffer_size) + goto exit; + + new = old + a; + if (new > prz->buffer_size) + new = prz->buffer_size; + atomic_set(&prz->buffer->size, new); + +exit: + raw_spin_unlock_irqrestore(&buffer_lock, flags); +} + +static size_t (*buffer_start_add)(struct persistent_ram_zone *, size_t) = buffer_start_add_atomic; +static void (*buffer_size_add)(struct persistent_ram_zone *, size_t) = buffer_size_add_atomic; + static void notrace persistent_ram_encode_rs8(struct persistent_ram_zone *prz, uint8_t *data, size_t len, uint8_t *ecc) { @@ -372,6 +419,9 @@ static void *persistent_ram_iomap(phys_addr_t start, size_t size) return NULL; } + buffer_start_add = buffer_start_add_locked; + buffer_size_add = buffer_size_add_locked; + return ioremap(start, size); } diff --git a/fs/qnx4/dir.c b/fs/qnx4/dir.c index 28ce014b3cef..b218f965817b 100644 --- a/fs/qnx4/dir.c +++ b/fs/qnx4/dir.c @@ -14,9 +14,9 @@ #include <linux/buffer_head.h> #include "qnx4.h" -static int qnx4_readdir(struct file *filp, void *dirent, filldir_t filldir) +static int qnx4_readdir(struct file *file, struct dir_context *ctx) { - struct inode *inode = file_inode(filp); + struct inode *inode = file_inode(file); unsigned int offset; struct buffer_head *bh; struct qnx4_inode_entry *de; @@ -26,48 +26,44 @@ static int qnx4_readdir(struct file *filp, void *dirent, filldir_t filldir) int size; QNX4DEBUG((KERN_INFO "qnx4_readdir:i_size = %ld\n", (long) inode->i_size)); - QNX4DEBUG((KERN_INFO "filp->f_pos = %ld\n", (long) filp->f_pos)); + QNX4DEBUG((KERN_INFO "pos = %ld\n", (long) ctx->pos)); - while (filp->f_pos < inode->i_size) { - blknum = qnx4_block_map( inode, filp->f_pos >> QNX4_BLOCK_SIZE_BITS ); + while (ctx->pos < inode->i_size) { + blknum = qnx4_block_map(inode, ctx->pos >> QNX4_BLOCK_SIZE_BITS); bh = sb_bread(inode->i_sb, blknum); - if(bh==NULL) { + if (bh == NULL) { printk(KERN_ERR "qnx4_readdir: bread failed (%ld)\n", blknum); - break; + return 0; } - ix = (int)(filp->f_pos >> QNX4_DIR_ENTRY_SIZE_BITS) % QNX4_INODES_PER_BLOCK; - while (ix < QNX4_INODES_PER_BLOCK) { + ix = (ctx->pos >> QNX4_DIR_ENTRY_SIZE_BITS) % QNX4_INODES_PER_BLOCK; + for (; ix < QNX4_INODES_PER_BLOCK; ix++, ctx->pos += QNX4_DIR_ENTRY_SIZE) { offset = ix * QNX4_DIR_ENTRY_SIZE; de = (struct qnx4_inode_entry *) (bh->b_data + offset); - size = strlen(de->di_fname); - if (size) { - if ( !( de->di_status & QNX4_FILE_LINK ) && size > QNX4_SHORT_NAME_MAX ) - size = QNX4_SHORT_NAME_MAX; - else if ( size > QNX4_NAME_MAX ) - size = QNX4_NAME_MAX; - - if ( ( de->di_status & (QNX4_FILE_USED|QNX4_FILE_LINK) ) != 0 ) { - QNX4DEBUG((KERN_INFO "qnx4_readdir:%.*s\n", size, de->di_fname)); - if ( ( de->di_status & QNX4_FILE_LINK ) == 0 ) - ino = blknum * QNX4_INODES_PER_BLOCK + ix - 1; - else { - le = (struct qnx4_link_info*)de; - ino = ( le32_to_cpu(le->dl_inode_blk) - 1 ) * - QNX4_INODES_PER_BLOCK + - le->dl_inode_ndx; - } - if (filldir(dirent, de->di_fname, size, filp->f_pos, ino, DT_UNKNOWN) < 0) { - brelse(bh); - goto out; - } - } + if (!de->di_fname[0]) + continue; + if (!(de->di_status & (QNX4_FILE_USED|QNX4_FILE_LINK))) + continue; + if (!(de->di_status & QNX4_FILE_LINK)) + size = QNX4_SHORT_NAME_MAX; + else + size = QNX4_NAME_MAX; + size = strnlen(de->di_fname, size); + QNX4DEBUG((KERN_INFO "qnx4_readdir:%.*s\n", size, de->di_fname)); + if (!(de->di_status & QNX4_FILE_LINK)) + ino = blknum * QNX4_INODES_PER_BLOCK + ix - 1; + else { + le = (struct qnx4_link_info*)de; + ino = ( le32_to_cpu(le->dl_inode_blk) - 1 ) * + QNX4_INODES_PER_BLOCK + + le->dl_inode_ndx; + } + if (!dir_emit(ctx, de->di_fname, size, ino, DT_UNKNOWN)) { + brelse(bh); + return 0; } - ix++; - filp->f_pos += QNX4_DIR_ENTRY_SIZE; } brelse(bh); } -out: return 0; } @@ -75,7 +71,7 @@ const struct file_operations qnx4_dir_operations = { .llseek = generic_file_llseek, .read = generic_read_dir, - .readdir = qnx4_readdir, + .iterate = qnx4_readdir, .fsync = generic_file_fsync, }; diff --git a/fs/qnx6/dir.c b/fs/qnx6/dir.c index afa6be6fc397..15b7d92ed60d 100644 --- a/fs/qnx6/dir.c +++ b/fs/qnx6/dir.c @@ -65,8 +65,8 @@ static struct qnx6_long_filename *qnx6_longname(struct super_block *sb, static int qnx6_dir_longfilename(struct inode *inode, struct qnx6_long_dir_entry *de, - void *dirent, loff_t pos, - unsigned de_inode, filldir_t filldir) + struct dir_context *ctx, + unsigned de_inode) { struct qnx6_long_filename *lf; struct super_block *s = inode->i_sb; @@ -104,8 +104,7 @@ static int qnx6_dir_longfilename(struct inode *inode, QNX6DEBUG((KERN_INFO "qnx6_readdir:%.*s inode:%u\n", lf_size, lf->lf_fname, de_inode)); - if (filldir(dirent, lf->lf_fname, lf_size, pos, de_inode, - DT_UNKNOWN) < 0) { + if (!dir_emit(ctx, lf->lf_fname, lf_size, de_inode, DT_UNKNOWN)) { qnx6_put_page(page); return 0; } @@ -115,18 +114,19 @@ static int qnx6_dir_longfilename(struct inode *inode, return 1; } -static int qnx6_readdir(struct file *filp, void *dirent, filldir_t filldir) +static int qnx6_readdir(struct file *file, struct dir_context *ctx) { - struct inode *inode = file_inode(filp); + struct inode *inode = file_inode(file); struct super_block *s = inode->i_sb; struct qnx6_sb_info *sbi = QNX6_SB(s); - loff_t pos = filp->f_pos & ~(QNX6_DIR_ENTRY_SIZE - 1); + loff_t pos = ctx->pos & ~(QNX6_DIR_ENTRY_SIZE - 1); unsigned long npages = dir_pages(inode); unsigned long n = pos >> PAGE_CACHE_SHIFT; unsigned start = (pos & ~PAGE_CACHE_MASK) / QNX6_DIR_ENTRY_SIZE; bool done = false; - if (filp->f_pos >= inode->i_size) + ctx->pos = pos; + if (ctx->pos >= inode->i_size) return 0; for ( ; !done && n < npages; n++, start = 0) { @@ -137,11 +137,11 @@ static int qnx6_readdir(struct file *filp, void *dirent, filldir_t filldir) if (IS_ERR(page)) { printk(KERN_ERR "qnx6_readdir: read failed\n"); - filp->f_pos = (n + 1) << PAGE_CACHE_SHIFT; + ctx->pos = (n + 1) << PAGE_CACHE_SHIFT; return PTR_ERR(page); } de = ((struct qnx6_dir_entry *)page_address(page)) + start; - for (; i < limit; i++, de++, pos += QNX6_DIR_ENTRY_SIZE) { + for (; i < limit; i++, de++, ctx->pos += QNX6_DIR_ENTRY_SIZE) { int size = de->de_size; u32 no_inode = fs32_to_cpu(sbi, de->de_inode); @@ -154,8 +154,7 @@ static int qnx6_readdir(struct file *filp, void *dirent, filldir_t filldir) structure / block */ if (!qnx6_dir_longfilename(inode, (struct qnx6_long_dir_entry *)de, - dirent, pos, no_inode, - filldir)) { + ctx, no_inode)) { done = true; break; } @@ -163,9 +162,8 @@ static int qnx6_readdir(struct file *filp, void *dirent, filldir_t filldir) QNX6DEBUG((KERN_INFO "qnx6_readdir:%.*s" " inode:%u\n", size, de->de_fname, no_inode)); - if (filldir(dirent, de->de_fname, size, - pos, no_inode, DT_UNKNOWN) - < 0) { + if (!dir_emit(ctx, de->de_fname, size, + no_inode, DT_UNKNOWN)) { done = true; break; } @@ -173,7 +171,6 @@ static int qnx6_readdir(struct file *filp, void *dirent, filldir_t filldir) } qnx6_put_page(page); } - filp->f_pos = pos; return 0; } @@ -282,7 +279,7 @@ found: const struct file_operations qnx6_dir_operations = { .llseek = generic_file_llseek, .read = generic_read_dir, - .readdir = qnx6_readdir, + .iterate = qnx6_readdir, .fsync = generic_file_fsync, }; diff --git a/fs/read_write.c b/fs/read_write.c index 03430008704e..122a3846d9e1 100644 --- a/fs/read_write.c +++ b/fs/read_write.c @@ -41,8 +41,19 @@ static inline int unsigned_offsets(struct file *file) return file->f_mode & FMODE_UNSIGNED_OFFSET; } -static loff_t lseek_execute(struct file *file, struct inode *inode, - loff_t offset, loff_t maxsize) +/** + * vfs_setpos - update the file offset for lseek + * @file: file structure in question + * @offset: file offset to seek to + * @maxsize: maximum file size + * + * This is a low-level filesystem helper for updating the file offset to + * the value specified by @offset if the given offset is valid and it is + * not equal to the current file offset. + * + * Return the specified offset on success and -EINVAL on invalid offset. + */ +loff_t vfs_setpos(struct file *file, loff_t offset, loff_t maxsize) { if (offset < 0 && !unsigned_offsets(file)) return -EINVAL; @@ -55,6 +66,7 @@ static loff_t lseek_execute(struct file *file, struct inode *inode, } return offset; } +EXPORT_SYMBOL(vfs_setpos); /** * generic_file_llseek_size - generic llseek implementation for regular files @@ -76,8 +88,6 @@ loff_t generic_file_llseek_size(struct file *file, loff_t offset, int whence, loff_t maxsize, loff_t eof) { - struct inode *inode = file->f_mapping->host; - switch (whence) { case SEEK_END: offset += eof; @@ -97,8 +107,7 @@ generic_file_llseek_size(struct file *file, loff_t offset, int whence, * like SEEK_SET. */ spin_lock(&file->f_lock); - offset = lseek_execute(file, inode, file->f_pos + offset, - maxsize); + offset = vfs_setpos(file, file->f_pos + offset, maxsize); spin_unlock(&file->f_lock); return offset; case SEEK_DATA: @@ -120,7 +129,7 @@ generic_file_llseek_size(struct file *file, loff_t offset, int whence, break; } - return lseek_execute(file, inode, offset, maxsize); + return vfs_setpos(file, offset, maxsize); } EXPORT_SYMBOL(generic_file_llseek_size); @@ -145,6 +154,26 @@ loff_t generic_file_llseek(struct file *file, loff_t offset, int whence) EXPORT_SYMBOL(generic_file_llseek); /** + * fixed_size_llseek - llseek implementation for fixed-sized devices + * @file: file structure to seek on + * @offset: file offset to seek to + * @whence: type of seek + * @size: size of the file + * + */ +loff_t fixed_size_llseek(struct file *file, loff_t offset, int whence, loff_t size) +{ + switch (whence) { + case SEEK_SET: case SEEK_CUR: case SEEK_END: + return generic_file_llseek_size(file, offset, whence, + size, size); + default: + return -EINVAL; + } +} +EXPORT_SYMBOL(fixed_size_llseek); + +/** * noop_llseek - No Operation Performed llseek implementation * @file: file structure to seek on * @offset: file offset to seek to @@ -296,7 +325,7 @@ out_putf: * them to something that fits in "int" so that others * won't have to do range checks all the time. */ -int rw_verify_area(int read_write, struct file *file, loff_t *ppos, size_t count) +int rw_verify_area(int read_write, struct file *file, const loff_t *ppos, size_t count) { struct inode *inode; loff_t pos; @@ -477,7 +506,8 @@ SYSCALL_DEFINE3(read, unsigned int, fd, char __user *, buf, size_t, count) if (f.file) { loff_t pos = file_pos_read(f.file); ret = vfs_read(f.file, buf, count, &pos); - file_pos_write(f.file, pos); + if (ret >= 0) + file_pos_write(f.file, pos); fdput(f); } return ret; @@ -492,7 +522,8 @@ SYSCALL_DEFINE3(write, unsigned int, fd, const char __user *, buf, if (f.file) { loff_t pos = file_pos_read(f.file); ret = vfs_write(f.file, buf, count, &pos); - file_pos_write(f.file, pos); + if (ret >= 0) + file_pos_write(f.file, pos); fdput(f); } @@ -780,7 +811,8 @@ SYSCALL_DEFINE3(readv, unsigned long, fd, const struct iovec __user *, vec, if (f.file) { loff_t pos = file_pos_read(f.file); ret = vfs_readv(f.file, vec, vlen, &pos); - file_pos_write(f.file, pos); + if (ret >= 0) + file_pos_write(f.file, pos); fdput(f); } @@ -799,7 +831,8 @@ SYSCALL_DEFINE3(writev, unsigned long, fd, const struct iovec __user *, vec, if (f.file) { loff_t pos = file_pos_read(f.file); ret = vfs_writev(f.file, vec, vlen, &pos); - file_pos_write(f.file, pos); + if (ret >= 0) + file_pos_write(f.file, pos); fdput(f); } @@ -959,7 +992,8 @@ COMPAT_SYSCALL_DEFINE3(readv, unsigned long, fd, return -EBADF; pos = f.file->f_pos; ret = compat_readv(f.file, vec, vlen, &pos); - f.file->f_pos = pos; + if (ret >= 0) + f.file->f_pos = pos; fdput(f); return ret; } @@ -1025,7 +1059,8 @@ COMPAT_SYSCALL_DEFINE3(writev, unsigned long, fd, return -EBADF; pos = f.file->f_pos; ret = compat_writev(f.file, vec, vlen, &pos); - f.file->f_pos = pos; + if (ret >= 0) + f.file->f_pos = pos; fdput(f); return ret; } @@ -1064,6 +1099,7 @@ static ssize_t do_sendfile(int out_fd, int in_fd, loff_t *ppos, struct fd in, out; struct inode *in_inode, *out_inode; loff_t pos; + loff_t out_pos; ssize_t retval; int fl; @@ -1077,12 +1113,14 @@ static ssize_t do_sendfile(int out_fd, int in_fd, loff_t *ppos, if (!(in.file->f_mode & FMODE_READ)) goto fput_in; retval = -ESPIPE; - if (!ppos) - ppos = &in.file->f_pos; - else + if (!ppos) { + pos = in.file->f_pos; + } else { + pos = *ppos; if (!(in.file->f_mode & FMODE_PREAD)) goto fput_in; - retval = rw_verify_area(READ, in.file, ppos, count); + } + retval = rw_verify_area(READ, in.file, &pos, count); if (retval < 0) goto fput_in; count = retval; @@ -1099,7 +1137,8 @@ static ssize_t do_sendfile(int out_fd, int in_fd, loff_t *ppos, retval = -EINVAL; in_inode = file_inode(in.file); out_inode = file_inode(out.file); - retval = rw_verify_area(WRITE, out.file, &out.file->f_pos, count); + out_pos = out.file->f_pos; + retval = rw_verify_area(WRITE, out.file, &out_pos, count); if (retval < 0) goto fput_out; count = retval; @@ -1107,7 +1146,6 @@ static ssize_t do_sendfile(int out_fd, int in_fd, loff_t *ppos, if (!max) max = min(in_inode->i_sb->s_maxbytes, out_inode->i_sb->s_maxbytes); - pos = *ppos; if (unlikely(pos + count > max)) { retval = -EOVERFLOW; if (pos >= max) @@ -1126,18 +1164,25 @@ static ssize_t do_sendfile(int out_fd, int in_fd, loff_t *ppos, if (in.file->f_flags & O_NONBLOCK) fl = SPLICE_F_NONBLOCK; #endif - retval = do_splice_direct(in.file, ppos, out.file, count, fl); + file_start_write(out.file); + retval = do_splice_direct(in.file, &pos, out.file, &out_pos, count, fl); + file_end_write(out.file); if (retval > 0) { add_rchar(current, retval); add_wchar(current, retval); fsnotify_access(in.file); fsnotify_modify(out.file); + out.file->f_pos = out_pos; + if (ppos) + *ppos = pos; + else + in.file->f_pos = pos; } inc_syscr(current); inc_syscw(current); - if (*ppos > max) + if (pos > max) retval = -EOVERFLOW; fput_out: diff --git a/fs/readdir.c b/fs/readdir.c index fee38e04fae4..93d71e574310 100644 --- a/fs/readdir.c +++ b/fs/readdir.c @@ -20,11 +20,11 @@ #include <asm/uaccess.h> -int vfs_readdir(struct file *file, filldir_t filler, void *buf) +int iterate_dir(struct file *file, struct dir_context *ctx) { struct inode *inode = file_inode(file); int res = -ENOTDIR; - if (!file->f_op || !file->f_op->readdir) + if (!file->f_op || !file->f_op->iterate) goto out; res = security_file_permission(file, MAY_READ); @@ -37,15 +37,16 @@ int vfs_readdir(struct file *file, filldir_t filler, void *buf) res = -ENOENT; if (!IS_DEADDIR(inode)) { - res = file->f_op->readdir(file, buf, filler); + ctx->pos = file->f_pos; + res = file->f_op->iterate(file, ctx); + file->f_pos = ctx->pos; file_accessed(file); } mutex_unlock(&inode->i_mutex); out: return res; } - -EXPORT_SYMBOL(vfs_readdir); +EXPORT_SYMBOL(iterate_dir); /* * Traditional linux readdir() handling.. @@ -66,6 +67,7 @@ struct old_linux_dirent { }; struct readdir_callback { + struct dir_context ctx; struct old_linux_dirent __user * dirent; int result; }; @@ -73,7 +75,7 @@ struct readdir_callback { static int fillonedir(void * __buf, const char * name, int namlen, loff_t offset, u64 ino, unsigned int d_type) { - struct readdir_callback * buf = (struct readdir_callback *) __buf; + struct readdir_callback *buf = (struct readdir_callback *) __buf; struct old_linux_dirent __user * dirent; unsigned long d_ino; @@ -107,15 +109,15 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd, { int error; struct fd f = fdget(fd); - struct readdir_callback buf; + struct readdir_callback buf = { + .ctx.actor = fillonedir, + .dirent = dirent + }; if (!f.file) return -EBADF; - buf.result = 0; - buf.dirent = dirent; - - error = vfs_readdir(f.file, fillonedir, &buf); + error = iterate_dir(f.file, &buf.ctx); if (buf.result) error = buf.result; @@ -137,6 +139,7 @@ struct linux_dirent { }; struct getdents_callback { + struct dir_context ctx; struct linux_dirent __user * current_dir; struct linux_dirent __user * previous; int count; @@ -191,7 +194,11 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd, { struct fd f; struct linux_dirent __user * lastdirent; - struct getdents_callback buf; + struct getdents_callback buf = { + .ctx.actor = filldir, + .count = count, + .current_dir = dirent + }; int error; if (!access_ok(VERIFY_WRITE, dirent, count)) @@ -201,17 +208,12 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd, if (!f.file) return -EBADF; - buf.current_dir = dirent; - buf.previous = NULL; - buf.count = count; - buf.error = 0; - - error = vfs_readdir(f.file, filldir, &buf); + error = iterate_dir(f.file, &buf.ctx); if (error >= 0) error = buf.error; lastdirent = buf.previous; if (lastdirent) { - if (put_user(f.file->f_pos, &lastdirent->d_off)) + if (put_user(buf.ctx.pos, &lastdirent->d_off)) error = -EFAULT; else error = count - buf.count; @@ -221,6 +223,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd, } struct getdents_callback64 { + struct dir_context ctx; struct linux_dirent64 __user * current_dir; struct linux_dirent64 __user * previous; int count; @@ -271,7 +274,11 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd, { struct fd f; struct linux_dirent64 __user * lastdirent; - struct getdents_callback64 buf; + struct getdents_callback64 buf = { + .ctx.actor = filldir64, + .count = count, + .current_dir = dirent + }; int error; if (!access_ok(VERIFY_WRITE, dirent, count)) @@ -281,17 +288,12 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd, if (!f.file) return -EBADF; - buf.current_dir = dirent; - buf.previous = NULL; - buf.count = count; - buf.error = 0; - - error = vfs_readdir(f.file, filldir64, &buf); + error = iterate_dir(f.file, &buf.ctx); if (error >= 0) error = buf.error; lastdirent = buf.previous; if (lastdirent) { - typeof(lastdirent->d_off) d_off = f.file->f_pos; + typeof(lastdirent->d_off) d_off = buf.ctx.pos; if (__put_user(d_off, &lastdirent->d_off)) error = -EFAULT; else diff --git a/fs/reiserfs/dir.c b/fs/reiserfs/dir.c index 6c2d136561cb..03e4ca5624d6 100644 --- a/fs/reiserfs/dir.c +++ b/fs/reiserfs/dir.c @@ -13,14 +13,14 @@ extern const struct reiserfs_key MIN_KEY; -static int reiserfs_readdir(struct file *, void *, filldir_t); +static int reiserfs_readdir(struct file *, struct dir_context *); static int reiserfs_dir_fsync(struct file *filp, loff_t start, loff_t end, int datasync); const struct file_operations reiserfs_dir_operations = { .llseek = generic_file_llseek, .read = generic_read_dir, - .readdir = reiserfs_readdir, + .iterate = reiserfs_readdir, .fsync = reiserfs_dir_fsync, .unlocked_ioctl = reiserfs_ioctl, #ifdef CONFIG_COMPAT @@ -50,18 +50,15 @@ static int reiserfs_dir_fsync(struct file *filp, loff_t start, loff_t end, #define store_ih(where,what) copy_item_head (where, what) -static inline bool is_privroot_deh(struct dentry *dir, - struct reiserfs_de_head *deh) +static inline bool is_privroot_deh(struct inode *dir, struct reiserfs_de_head *deh) { - struct dentry *privroot = REISERFS_SB(dir->d_sb)->priv_root; - return (dir == dir->d_parent && privroot->d_inode && + struct dentry *privroot = REISERFS_SB(dir->i_sb)->priv_root; + return (privroot->d_inode && deh->deh_objectid == INODE_PKEY(privroot->d_inode)->k_objectid); } -int reiserfs_readdir_dentry(struct dentry *dentry, void *dirent, - filldir_t filldir, loff_t *pos) +int reiserfs_readdir_inode(struct inode *inode, struct dir_context *ctx) { - struct inode *inode = dentry->d_inode; struct cpu_key pos_key; /* key of current position in the directory (key of directory entry) */ INITIALIZE_PATH(path_to_entry); struct buffer_head *bh; @@ -81,7 +78,7 @@ int reiserfs_readdir_dentry(struct dentry *dentry, void *dirent, /* form key for search the next directory entry using f_pos field of file structure */ - make_cpu_key(&pos_key, inode, *pos ?: DOT_OFFSET, TYPE_DIRENTRY, 3); + make_cpu_key(&pos_key, inode, ctx->pos ?: DOT_OFFSET, TYPE_DIRENTRY, 3); next_pos = cpu_key_k_offset(&pos_key); path_to_entry.reada = PATH_READA; @@ -126,7 +123,6 @@ int reiserfs_readdir_dentry(struct dentry *dentry, void *dirent, entry_num++, deh++) { int d_reclen; char *d_name; - off_t d_off; ino_t d_ino; if (!de_visible(deh)) @@ -155,11 +151,10 @@ int reiserfs_readdir_dentry(struct dentry *dentry, void *dirent, } /* Ignore the .reiserfs_priv entry */ - if (is_privroot_deh(dentry, deh)) + if (is_privroot_deh(inode, deh)) continue; - d_off = deh_offset(deh); - *pos = d_off; + ctx->pos = deh_offset(deh); d_ino = deh_objectid(deh); if (d_reclen <= 32) { local_buf = small_buf; @@ -187,9 +182,9 @@ int reiserfs_readdir_dentry(struct dentry *dentry, void *dirent, * the write lock here for other waiters */ reiserfs_write_unlock(inode->i_sb); - if (filldir - (dirent, local_buf, d_reclen, d_off, d_ino, - DT_UNKNOWN) < 0) { + if (!dir_emit + (ctx, local_buf, d_reclen, d_ino, + DT_UNKNOWN)) { reiserfs_write_lock(inode->i_sb); if (local_buf != small_buf) { kfree(local_buf); @@ -237,7 +232,7 @@ int reiserfs_readdir_dentry(struct dentry *dentry, void *dirent, } /* while */ end: - *pos = next_pos; + ctx->pos = next_pos; pathrelse(&path_to_entry); reiserfs_check_path(&path_to_entry); out: @@ -245,10 +240,9 @@ out: return ret; } -static int reiserfs_readdir(struct file *file, void *dirent, filldir_t filldir) +static int reiserfs_readdir(struct file *file, struct dir_context *ctx) { - struct dentry *dentry = file->f_path.dentry; - return reiserfs_readdir_dentry(dentry, dirent, filldir, &file->f_pos); + return reiserfs_readdir_inode(file_inode(file), ctx); } /* compose directory item containing "." and ".." entries (entries are diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c index f844533792ee..0048cc16a6a8 100644 --- a/fs/reiserfs/inode.c +++ b/fs/reiserfs/inode.c @@ -2975,16 +2975,19 @@ static int invalidatepage_can_drop(struct inode *inode, struct buffer_head *bh) } /* clm -- taken from fs/buffer.c:block_invalidate_page */ -static void reiserfs_invalidatepage(struct page *page, unsigned long offset) +static void reiserfs_invalidatepage(struct page *page, unsigned int offset, + unsigned int length) { struct buffer_head *head, *bh, *next; struct inode *inode = page->mapping->host; unsigned int curr_off = 0; + unsigned int stop = offset + length; + int partial_page = (offset || length < PAGE_CACHE_SIZE); int ret = 1; BUG_ON(!PageLocked(page)); - if (offset == 0) + if (!partial_page) ClearPageChecked(page); if (!page_has_buffers(page)) @@ -2996,6 +2999,9 @@ static void reiserfs_invalidatepage(struct page *page, unsigned long offset) unsigned int next_off = curr_off + bh->b_size; next = bh->b_this_page; + if (next_off > stop) + goto out; + /* * is this block fully invalidated? */ @@ -3014,7 +3020,7 @@ static void reiserfs_invalidatepage(struct page *page, unsigned long offset) * The get_block cached value has been unconditionally invalidated, * so real IO is not possible anymore. */ - if (!offset && ret) { + if (!partial_page && ret) { ret = try_to_release_page(page, 0); /* maybe should BUG_ON(!ret); - neilb */ } diff --git a/fs/reiserfs/reiserfs.h b/fs/reiserfs/reiserfs.h index 157e474ab303..3df5ce6c724d 100644 --- a/fs/reiserfs/reiserfs.h +++ b/fs/reiserfs/reiserfs.h @@ -2709,7 +2709,7 @@ extern const struct inode_operations reiserfs_dir_inode_operations; extern const struct inode_operations reiserfs_symlink_inode_operations; extern const struct inode_operations reiserfs_special_inode_operations; extern const struct file_operations reiserfs_dir_operations; -int reiserfs_readdir_dentry(struct dentry *, void *, filldir_t, loff_t *); +int reiserfs_readdir_inode(struct inode *, struct dir_context *); /* tail_conversion.c */ int direct2indirect(struct reiserfs_transaction_handle *, struct inode *, diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c index 821bcf70e467..c69cdd749f09 100644 --- a/fs/reiserfs/xattr.c +++ b/fs/reiserfs/xattr.c @@ -171,6 +171,7 @@ static struct dentry *open_xa_dir(const struct inode *inode, int flags) * modifying extended attributes. This includes operations such as permissions * or ownership changes, object deletions, etc. */ struct reiserfs_dentry_buf { + struct dir_context ctx; struct dentry *xadir; int count; struct dentry *dentries[8]; @@ -223,9 +224,8 @@ static int reiserfs_for_each_xattr(struct inode *inode, { struct dentry *dir; int i, err = 0; - loff_t pos = 0; struct reiserfs_dentry_buf buf = { - .count = 0, + .ctx.actor = fill_with_dentries, }; /* Skip out, an xattr has no xattrs associated with it */ @@ -249,29 +249,27 @@ static int reiserfs_for_each_xattr(struct inode *inode, reiserfs_write_lock(inode->i_sb); buf.xadir = dir; - err = reiserfs_readdir_dentry(dir, &buf, fill_with_dentries, &pos); - while ((err == 0 || err == -ENOSPC) && buf.count) { - err = 0; - - for (i = 0; i < buf.count && buf.dentries[i]; i++) { - int lerr = 0; + while (1) { + err = reiserfs_readdir_inode(dir->d_inode, &buf.ctx); + if (err) + break; + if (!buf.count) + break; + for (i = 0; !err && i < buf.count && buf.dentries[i]; i++) { struct dentry *dentry = buf.dentries[i]; - if (err == 0 && !S_ISDIR(dentry->d_inode->i_mode)) - lerr = action(dentry, data); + if (!S_ISDIR(dentry->d_inode->i_mode)) + err = action(dentry, data); dput(dentry); buf.dentries[i] = NULL; - err = lerr ?: err; } + if (err) + break; buf.count = 0; - if (!err) - err = reiserfs_readdir_dentry(dir, &buf, - fill_with_dentries, &pos); } mutex_unlock(&dir->d_inode->i_mutex); - /* Clean up after a failed readdir */ cleanup_dentry_buf(&buf); if (!err) { @@ -800,6 +798,7 @@ int reiserfs_removexattr(struct dentry *dentry, const char *name) } struct listxattr_buf { + struct dir_context ctx; size_t size; size_t pos; char *buf; @@ -845,8 +844,8 @@ ssize_t reiserfs_listxattr(struct dentry * dentry, char *buffer, size_t size) { struct dentry *dir; int err = 0; - loff_t pos = 0; struct listxattr_buf buf = { + .ctx.actor = listxattr_filler, .dentry = dentry, .buf = buffer, .size = buffer ? size : 0, @@ -868,7 +867,7 @@ ssize_t reiserfs_listxattr(struct dentry * dentry, char *buffer, size_t size) } mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_XATTR); - err = reiserfs_readdir_dentry(dir, &buf, listxattr_filler, &pos); + err = reiserfs_readdir_inode(dir->d_inode, &buf.ctx); mutex_unlock(&dir->d_inode->i_mutex); if (!err) diff --git a/fs/romfs/super.c b/fs/romfs/super.c index 15cbc41ee365..ff1d3d42e72a 100644 --- a/fs/romfs/super.c +++ b/fs/romfs/super.c @@ -145,19 +145,18 @@ static const struct address_space_operations romfs_aops = { /* * read the entries from a directory */ -static int romfs_readdir(struct file *filp, void *dirent, filldir_t filldir) +static int romfs_readdir(struct file *file, struct dir_context *ctx) { - struct inode *i = file_inode(filp); + struct inode *i = file_inode(file); struct romfs_inode ri; unsigned long offset, maxoff; int j, ino, nextfh; - int stored = 0; char fsname[ROMFS_MAXFN]; /* XXX dynamic? */ int ret; maxoff = romfs_maxsize(i->i_sb); - offset = filp->f_pos; + offset = ctx->pos; if (!offset) { offset = i->i_ino & ROMFH_MASK; ret = romfs_dev_read(i->i_sb, offset, &ri, ROMFH_SIZE); @@ -170,10 +169,10 @@ static int romfs_readdir(struct file *filp, void *dirent, filldir_t filldir) for (;;) { if (!offset || offset >= maxoff) { offset = maxoff; - filp->f_pos = offset; + ctx->pos = offset; goto out; } - filp->f_pos = offset; + ctx->pos = offset; /* Fetch inode info */ ret = romfs_dev_read(i->i_sb, offset, &ri, ROMFH_SIZE); @@ -194,16 +193,14 @@ static int romfs_readdir(struct file *filp, void *dirent, filldir_t filldir) nextfh = be32_to_cpu(ri.next); if ((nextfh & ROMFH_TYPE) == ROMFH_HRD) ino = be32_to_cpu(ri.spec); - if (filldir(dirent, fsname, j, offset, ino, - romfs_dtype_table[nextfh & ROMFH_TYPE]) < 0) + if (!dir_emit(ctx, fsname, j, ino, + romfs_dtype_table[nextfh & ROMFH_TYPE])) goto out; - stored++; offset = nextfh & ROMFH_MASK; } - out: - return stored; + return 0; } /* @@ -281,7 +278,7 @@ error: static const struct file_operations romfs_dir_operations = { .read = generic_read_dir, - .readdir = romfs_readdir, + .iterate = romfs_readdir, .llseek = default_llseek, }; diff --git a/fs/select.c b/fs/select.c index 8c1c96c27062..6b14dc7df3a4 100644 --- a/fs/select.c +++ b/fs/select.c @@ -27,6 +27,7 @@ #include <linux/rcupdate.h> #include <linux/hrtimer.h> #include <linux/sched/rt.h> +#include <linux/freezer.h> #include <asm/uaccess.h> @@ -236,7 +237,8 @@ int poll_schedule_timeout(struct poll_wqueues *pwq, int state, set_current_state(state); if (!pwq->triggered) - rc = schedule_hrtimeout_range(expires, slack, HRTIMER_MODE_ABS); + rc = freezable_schedule_hrtimeout_range(expires, slack, + HRTIMER_MODE_ABS); __set_current_state(TASK_RUNNING); /* diff --git a/fs/splice.c b/fs/splice.c index e6b25598c8c4..3b7ee656f3aa 100644 --- a/fs/splice.c +++ b/fs/splice.c @@ -1098,27 +1098,13 @@ static long do_splice_from(struct pipe_inode_info *pipe, struct file *out, { ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t, unsigned int); - int ret; - - if (unlikely(!(out->f_mode & FMODE_WRITE))) - return -EBADF; - - if (unlikely(out->f_flags & O_APPEND)) - return -EINVAL; - - ret = rw_verify_area(WRITE, out, ppos, len); - if (unlikely(ret < 0)) - return ret; if (out->f_op && out->f_op->splice_write) splice_write = out->f_op->splice_write; else splice_write = default_file_splice_write; - file_start_write(out); - ret = splice_write(pipe, out, ppos, len, flags); - file_end_write(out); - return ret; + return splice_write(pipe, out, ppos, len, flags); } /* @@ -1274,7 +1260,7 @@ static int direct_splice_actor(struct pipe_inode_info *pipe, { struct file *file = sd->u.file; - return do_splice_from(pipe, file, &file->f_pos, sd->total_len, + return do_splice_from(pipe, file, sd->opos, sd->total_len, sd->flags); } @@ -1283,6 +1269,7 @@ static int direct_splice_actor(struct pipe_inode_info *pipe, * @in: file to splice from * @ppos: input file offset * @out: file to splice to + * @opos: output file offset * @len: number of bytes to splice * @flags: splice modifier flags * @@ -1294,7 +1281,7 @@ static int direct_splice_actor(struct pipe_inode_info *pipe, * */ long do_splice_direct(struct file *in, loff_t *ppos, struct file *out, - size_t len, unsigned int flags) + loff_t *opos, size_t len, unsigned int flags) { struct splice_desc sd = { .len = len, @@ -1302,9 +1289,20 @@ long do_splice_direct(struct file *in, loff_t *ppos, struct file *out, .flags = flags, .pos = *ppos, .u.file = out, + .opos = opos, }; long ret; + if (unlikely(!(out->f_mode & FMODE_WRITE))) + return -EBADF; + + if (unlikely(out->f_flags & O_APPEND)) + return -EINVAL; + + ret = rw_verify_area(WRITE, out, opos, len); + if (unlikely(ret < 0)) + return ret; + ret = splice_direct_to_actor(in, &sd, direct_splice_actor); if (ret > 0) *ppos = sd.pos; @@ -1325,7 +1323,7 @@ static long do_splice(struct file *in, loff_t __user *off_in, { struct pipe_inode_info *ipipe; struct pipe_inode_info *opipe; - loff_t offset, *off; + loff_t offset; long ret; ipipe = get_pipe_info(in); @@ -1356,13 +1354,27 @@ static long do_splice(struct file *in, loff_t __user *off_in, return -EINVAL; if (copy_from_user(&offset, off_out, sizeof(loff_t))) return -EFAULT; - off = &offset; - } else - off = &out->f_pos; + } else { + offset = out->f_pos; + } - ret = do_splice_from(ipipe, out, off, len, flags); + if (unlikely(!(out->f_mode & FMODE_WRITE))) + return -EBADF; - if (off_out && copy_to_user(off_out, off, sizeof(loff_t))) + if (unlikely(out->f_flags & O_APPEND)) + return -EINVAL; + + ret = rw_verify_area(WRITE, out, &offset, len); + if (unlikely(ret < 0)) + return ret; + + file_start_write(out); + ret = do_splice_from(ipipe, out, &offset, len, flags); + file_end_write(out); + + if (!off_out) + out->f_pos = offset; + else if (copy_to_user(off_out, &offset, sizeof(loff_t))) ret = -EFAULT; return ret; @@ -1376,13 +1388,15 @@ static long do_splice(struct file *in, loff_t __user *off_in, return -EINVAL; if (copy_from_user(&offset, off_in, sizeof(loff_t))) return -EFAULT; - off = &offset; - } else - off = &in->f_pos; + } else { + offset = in->f_pos; + } - ret = do_splice_to(in, off, opipe, len, flags); + ret = do_splice_to(in, &offset, opipe, len, flags); - if (off_in && copy_to_user(off_in, off, sizeof(loff_t))) + if (!off_in) + in->f_pos = offset; + else if (copy_to_user(off_in, &offset, sizeof(loff_t))) ret = -EFAULT; return ret; diff --git a/fs/squashfs/dir.c b/fs/squashfs/dir.c index 57dc70ebbb19..f7f527bf8c10 100644 --- a/fs/squashfs/dir.c +++ b/fs/squashfs/dir.c @@ -100,7 +100,7 @@ static int get_dir_index_using_offset(struct super_block *sb, } -static int squashfs_readdir(struct file *file, void *dirent, filldir_t filldir) +static int squashfs_readdir(struct file *file, struct dir_context *ctx) { struct inode *inode = file_inode(file); struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info; @@ -127,11 +127,11 @@ static int squashfs_readdir(struct file *file, void *dirent, filldir_t filldir) * It also means that the external f_pos is offset by 3 from the * on-disk directory f_pos. */ - while (file->f_pos < 3) { + while (ctx->pos < 3) { char *name; int i_ino; - if (file->f_pos == 0) { + if (ctx->pos == 0) { name = "."; size = 1; i_ino = inode->i_ino; @@ -141,24 +141,18 @@ static int squashfs_readdir(struct file *file, void *dirent, filldir_t filldir) i_ino = squashfs_i(inode)->parent; } - TRACE("Calling filldir(%p, %s, %d, %lld, %d, %d)\n", - dirent, name, size, file->f_pos, i_ino, - squashfs_filetype_table[1]); - - if (filldir(dirent, name, size, file->f_pos, i_ino, - squashfs_filetype_table[1]) < 0) { - TRACE("Filldir returned less than 0\n"); + if (!dir_emit(ctx, name, size, i_ino, + squashfs_filetype_table[1])) goto finish; - } - file->f_pos += size; + ctx->pos += size; } length = get_dir_index_using_offset(inode->i_sb, &block, &offset, squashfs_i(inode)->dir_idx_start, squashfs_i(inode)->dir_idx_offset, squashfs_i(inode)->dir_idx_cnt, - file->f_pos); + ctx->pos); while (length < i_size_read(inode)) { /* @@ -198,7 +192,7 @@ static int squashfs_readdir(struct file *file, void *dirent, filldir_t filldir) length += sizeof(*dire) + size; - if (file->f_pos >= length) + if (ctx->pos >= length) continue; dire->name[size] = '\0'; @@ -206,22 +200,12 @@ static int squashfs_readdir(struct file *file, void *dirent, filldir_t filldir) ((short) le16_to_cpu(dire->inode_number)); type = le16_to_cpu(dire->type); - TRACE("Calling filldir(%p, %s, %d, %lld, %x:%x, %d, %d)" - "\n", dirent, dire->name, size, - file->f_pos, - le32_to_cpu(dirh.start_block), - le16_to_cpu(dire->offset), - inode_number, - squashfs_filetype_table[type]); - - if (filldir(dirent, dire->name, size, file->f_pos, + if (!dir_emit(ctx, dire->name, size, inode_number, - squashfs_filetype_table[type]) < 0) { - TRACE("Filldir returned less than 0\n"); + squashfs_filetype_table[type])) goto finish; - } - file->f_pos = length; + ctx->pos = length; } } @@ -238,6 +222,6 @@ failed_read: const struct file_operations squashfs_dir_ops = { .read = generic_read_dir, - .readdir = squashfs_readdir, + .iterate = squashfs_readdir, .llseek = default_llseek, }; diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c index e8e0e71b29d5..e068e744dbdd 100644 --- a/fs/sysfs/dir.c +++ b/fs/sysfs/dir.c @@ -74,7 +74,7 @@ static int sysfs_sd_compare(const struct sysfs_dirent *left, } /** - * sysfs_link_subling - link sysfs_dirent into sibling rbtree + * sysfs_link_sibling - link sysfs_dirent into sibling rbtree * @sd: sysfs_dirent of interest * * Link @sd into its sibling rbtree which starts from @@ -998,68 +998,38 @@ static struct sysfs_dirent *sysfs_dir_next_pos(const void *ns, return pos; } -static int sysfs_readdir(struct file * filp, void * dirent, filldir_t filldir) +static int sysfs_readdir(struct file *file, struct dir_context *ctx) { - struct dentry *dentry = filp->f_path.dentry; + struct dentry *dentry = file->f_path.dentry; struct sysfs_dirent * parent_sd = dentry->d_fsdata; - struct sysfs_dirent *pos = filp->private_data; + struct sysfs_dirent *pos = file->private_data; enum kobj_ns_type type; const void *ns; - ino_t ino; - loff_t off; type = sysfs_ns_type(parent_sd); ns = sysfs_info(dentry->d_sb)->ns[type]; - if (filp->f_pos == 0) { - ino = parent_sd->s_ino; - if (filldir(dirent, ".", 1, filp->f_pos, ino, DT_DIR) == 0) - filp->f_pos++; - else - return 0; - } - if (filp->f_pos == 1) { - if (parent_sd->s_parent) - ino = parent_sd->s_parent->s_ino; - else - ino = parent_sd->s_ino; - if (filldir(dirent, "..", 2, filp->f_pos, ino, DT_DIR) == 0) - filp->f_pos++; - else - return 0; - } + if (!dir_emit_dots(file, ctx)) + return 0; mutex_lock(&sysfs_mutex); - off = filp->f_pos; - for (pos = sysfs_dir_pos(ns, parent_sd, filp->f_pos, pos); + for (pos = sysfs_dir_pos(ns, parent_sd, ctx->pos, pos); pos; - pos = sysfs_dir_next_pos(ns, parent_sd, filp->f_pos, pos)) { - const char * name; - unsigned int type; - int len, ret; - - name = pos->s_name; - len = strlen(name); - ino = pos->s_ino; - type = dt_type(pos); - off = filp->f_pos = pos->s_hash; - filp->private_data = sysfs_get(pos); + pos = sysfs_dir_next_pos(ns, parent_sd, ctx->pos, pos)) { + const char *name = pos->s_name; + unsigned int type = dt_type(pos); + int len = strlen(name); + ino_t ino = pos->s_ino; + ctx->pos = pos->s_hash; + file->private_data = sysfs_get(pos); mutex_unlock(&sysfs_mutex); - ret = filldir(dirent, name, len, off, ino, type); + if (!dir_emit(ctx, name, len, ino, type)) + return 0; mutex_lock(&sysfs_mutex); - if (ret < 0) - break; } mutex_unlock(&sysfs_mutex); - - /* don't reference last entry if its refcount is dropped */ - if (!pos) { - filp->private_data = NULL; - - /* EOF and not changed as 0 or 1 in read/write path */ - if (off == filp->f_pos && off > 1) - filp->f_pos = INT_MAX; - } + file->private_data = NULL; + ctx->pos = INT_MAX; return 0; } @@ -1077,7 +1047,7 @@ static loff_t sysfs_dir_llseek(struct file *file, loff_t offset, int whence) const struct file_operations sysfs_dir_operations = { .read = generic_read_dir, - .readdir = sysfs_readdir, + .iterate = sysfs_readdir, .release = sysfs_dir_release, .llseek = sysfs_dir_llseek, }; diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c index 602f56db0442..d2bb7ed8fa74 100644 --- a/fs/sysfs/file.c +++ b/fs/sysfs/file.c @@ -449,10 +449,12 @@ void sysfs_notify_dirent(struct sysfs_dirent *sd) spin_lock_irqsave(&sysfs_open_dirent_lock, flags); - od = sd->s_attr.open; - if (od) { - atomic_inc(&od->event); - wake_up_interruptible(&od->poll); + if (!WARN_ON(sysfs_type(sd) != SYSFS_KOBJ_ATTR)) { + od = sd->s_attr.open; + if (od) { + atomic_inc(&od->event); + wake_up_interruptible(&od->poll); + } } spin_unlock_irqrestore(&sysfs_open_dirent_lock, flags); diff --git a/fs/sysfs/inode.c b/fs/sysfs/inode.c index 0ce3ccf7f401..3e2837a633ed 100644 --- a/fs/sysfs/inode.c +++ b/fs/sysfs/inode.c @@ -24,8 +24,6 @@ #include <linux/security.h> #include "sysfs.h" -extern struct super_block * sysfs_sb; - static const struct address_space_operations sysfs_aops = { .readpage = simple_readpage, .write_begin = simple_write_begin, diff --git a/fs/sysv/dir.c b/fs/sysv/dir.c index 3799e8dac3eb..d42291d08215 100644 --- a/fs/sysv/dir.c +++ b/fs/sysv/dir.c @@ -18,12 +18,12 @@ #include <linux/swap.h> #include "sysv.h" -static int sysv_readdir(struct file *, void *, filldir_t); +static int sysv_readdir(struct file *, struct dir_context *); const struct file_operations sysv_dir_operations = { .llseek = generic_file_llseek, .read = generic_read_dir, - .readdir = sysv_readdir, + .iterate = sysv_readdir, .fsync = generic_file_fsync, }; @@ -65,18 +65,21 @@ static struct page * dir_get_page(struct inode *dir, unsigned long n) return page; } -static int sysv_readdir(struct file * filp, void * dirent, filldir_t filldir) +static int sysv_readdir(struct file *file, struct dir_context *ctx) { - unsigned long pos = filp->f_pos; - struct inode *inode = file_inode(filp); + unsigned long pos = ctx->pos; + struct inode *inode = file_inode(file); struct super_block *sb = inode->i_sb; - unsigned offset = pos & ~PAGE_CACHE_MASK; - unsigned long n = pos >> PAGE_CACHE_SHIFT; unsigned long npages = dir_pages(inode); + unsigned offset; + unsigned long n; - pos = (pos + SYSV_DIRSIZE-1) & ~(SYSV_DIRSIZE-1); + ctx->pos = pos = (pos + SYSV_DIRSIZE-1) & ~(SYSV_DIRSIZE-1); if (pos >= inode->i_size) - goto done; + return 0; + + offset = pos & ~PAGE_CACHE_MASK; + n = pos >> PAGE_CACHE_SHIFT; for ( ; n < npages; n++, offset = 0) { char *kaddr, *limit; @@ -88,29 +91,21 @@ static int sysv_readdir(struct file * filp, void * dirent, filldir_t filldir) kaddr = (char *)page_address(page); de = (struct sysv_dir_entry *)(kaddr+offset); limit = kaddr + PAGE_CACHE_SIZE - SYSV_DIRSIZE; - for ( ;(char*)de <= limit; de++) { + for ( ;(char*)de <= limit; de++, ctx->pos += sizeof(*de)) { char *name = de->name; - int over; if (!de->inode) continue; - offset = (char *)de - kaddr; - - over = filldir(dirent, name, strnlen(name,SYSV_NAMELEN), - ((loff_t)n<<PAGE_CACHE_SHIFT) | offset, + if (!dir_emit(ctx, name, strnlen(name,SYSV_NAMELEN), fs16_to_cpu(SYSV_SB(sb), de->inode), - DT_UNKNOWN); - if (over) { + DT_UNKNOWN)) { dir_put_page(page); - goto done; + return 0; } } dir_put_page(page); } - -done: - filp->f_pos = ((loff_t)n << PAGE_CACHE_SHIFT) | offset; return 0; } diff --git a/fs/sysv/namei.c b/fs/sysv/namei.c index 1c0d5f264767..731b2bbcaab3 100644 --- a/fs/sysv/namei.c +++ b/fs/sysv/namei.c @@ -27,8 +27,7 @@ static int add_nondir(struct dentry *dentry, struct inode *inode) return err; } -static int sysv_hash(const struct dentry *dentry, const struct inode *inode, - struct qstr *qstr) +static int sysv_hash(const struct dentry *dentry, struct qstr *qstr) { /* Truncate the name in place, avoids having to define a compare function. */ diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c index de08c92f2e23..6b4947f75af7 100644 --- a/fs/ubifs/dir.c +++ b/fs/ubifs/dir.c @@ -346,38 +346,46 @@ static unsigned int vfs_dent_type(uint8_t type) * This means that UBIFS cannot support NFS which requires full * 'seekdir()'/'telldir()' support. */ -static int ubifs_readdir(struct file *file, void *dirent, filldir_t filldir) +static int ubifs_readdir(struct file *file, struct dir_context *ctx) { - int err, over = 0; + int err; struct qstr nm; union ubifs_key key; struct ubifs_dent_node *dent; struct inode *dir = file_inode(file); struct ubifs_info *c = dir->i_sb->s_fs_info; - dbg_gen("dir ino %lu, f_pos %#llx", dir->i_ino, file->f_pos); + dbg_gen("dir ino %lu, f_pos %#llx", dir->i_ino, ctx->pos); - if (file->f_pos > UBIFS_S_KEY_HASH_MASK || file->f_pos == 2) + if (ctx->pos > UBIFS_S_KEY_HASH_MASK || ctx->pos == 2) /* * The directory was seek'ed to a senseless position or there * are no more entries. */ return 0; - /* File positions 0 and 1 correspond to "." and ".." */ - if (file->f_pos == 0) { - ubifs_assert(!file->private_data); - over = filldir(dirent, ".", 1, 0, dir->i_ino, DT_DIR); - if (over) - return 0; - file->f_pos = 1; + if (file->f_version == 0) { + /* + * The file was seek'ed, which means that @file->private_data + * is now invalid. This may also be just the first + * 'ubifs_readdir()' invocation, in which case + * @file->private_data is NULL, and the below code is + * basically a no-op. + */ + kfree(file->private_data); + file->private_data = NULL; } - if (file->f_pos == 1) { + /* + * 'generic_file_llseek()' unconditionally sets @file->f_version to + * zero, and we use this for detecting whether the file was seek'ed. + */ + file->f_version = 1; + + /* File positions 0 and 1 correspond to "." and ".." */ + if (ctx->pos < 2) { ubifs_assert(!file->private_data); - over = filldir(dirent, "..", 2, 1, - parent_ino(file->f_path.dentry), DT_DIR); - if (over) + if (!dir_emit_dots(file, ctx)) return 0; /* Find the first entry in TNC and save it */ @@ -389,7 +397,7 @@ static int ubifs_readdir(struct file *file, void *dirent, filldir_t filldir) goto out; } - file->f_pos = key_hash_flash(c, &dent->key); + ctx->pos = key_hash_flash(c, &dent->key); file->private_data = dent; } @@ -397,17 +405,16 @@ static int ubifs_readdir(struct file *file, void *dirent, filldir_t filldir) if (!dent) { /* * The directory was seek'ed to and is now readdir'ed. - * Find the entry corresponding to @file->f_pos or the - * closest one. + * Find the entry corresponding to @ctx->pos or the closest one. */ - dent_key_init_hash(c, &key, dir->i_ino, file->f_pos); + dent_key_init_hash(c, &key, dir->i_ino, ctx->pos); nm.name = NULL; dent = ubifs_tnc_next_ent(c, &key, &nm); if (IS_ERR(dent)) { err = PTR_ERR(dent); goto out; } - file->f_pos = key_hash_flash(c, &dent->key); + ctx->pos = key_hash_flash(c, &dent->key); file->private_data = dent; } @@ -419,10 +426,9 @@ static int ubifs_readdir(struct file *file, void *dirent, filldir_t filldir) ubifs_inode(dir)->creat_sqnum); nm.len = le16_to_cpu(dent->nlen); - over = filldir(dirent, dent->name, nm.len, file->f_pos, + if (!dir_emit(ctx, dent->name, nm.len, le64_to_cpu(dent->inum), - vfs_dent_type(dent->type)); - if (over) + vfs_dent_type(dent->type))) return 0; /* Switch to the next entry */ @@ -435,7 +441,7 @@ static int ubifs_readdir(struct file *file, void *dirent, filldir_t filldir) } kfree(file->private_data); - file->f_pos = key_hash_flash(c, &dent->key); + ctx->pos = key_hash_flash(c, &dent->key); file->private_data = dent; cond_resched(); } @@ -448,18 +454,11 @@ out: kfree(file->private_data); file->private_data = NULL; - file->f_pos = 2; + /* 2 is a special value indicating that there are no more direntries */ + ctx->pos = 2; return 0; } -/* If a directory is seeked, we have to free saved readdir() state */ -static loff_t ubifs_dir_llseek(struct file *file, loff_t offset, int whence) -{ - kfree(file->private_data); - file->private_data = NULL; - return generic_file_llseek(file, offset, whence); -} - /* Free saved readdir() state when the directory is closed */ static int ubifs_dir_release(struct inode *dir, struct file *file) { @@ -1177,10 +1176,10 @@ const struct inode_operations ubifs_dir_inode_operations = { }; const struct file_operations ubifs_dir_operations = { - .llseek = ubifs_dir_llseek, + .llseek = generic_file_llseek, .release = ubifs_dir_release, .read = generic_read_dir, - .readdir = ubifs_readdir, + .iterate = ubifs_readdir, .fsync = ubifs_fsync, .unlocked_ioctl = ubifs_ioctl, #ifdef CONFIG_COMPAT diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c index 14374530784c..123c79b7261e 100644 --- a/fs/ubifs/file.c +++ b/fs/ubifs/file.c @@ -1277,13 +1277,14 @@ int ubifs_setattr(struct dentry *dentry, struct iattr *attr) return err; } -static void ubifs_invalidatepage(struct page *page, unsigned long offset) +static void ubifs_invalidatepage(struct page *page, unsigned int offset, + unsigned int length) { struct inode *inode = page->mapping->host; struct ubifs_info *c = inode->i_sb->s_fs_info; ubifs_assert(PagePrivate(page)); - if (offset) + if (offset || length < PAGE_CACHE_SIZE) /* Partial page remains dirty */ return; diff --git a/fs/udf/dir.c b/fs/udf/dir.c index b3e93f5e17c3..a012c51caffd 100644 --- a/fs/udf/dir.c +++ b/fs/udf/dir.c @@ -35,14 +35,16 @@ #include "udf_i.h" #include "udf_sb.h" -static int do_udf_readdir(struct inode *dir, struct file *filp, - filldir_t filldir, void *dirent) + +static int udf_readdir(struct file *file, struct dir_context *ctx) { + struct inode *dir = file_inode(file); + struct udf_inode_info *iinfo = UDF_I(dir); struct udf_fileident_bh fibh = { .sbh = NULL, .ebh = NULL}; struct fileIdentDesc *fi = NULL; struct fileIdentDesc cfi; int block, iblock; - loff_t nf_pos = (filp->f_pos - 1) << 2; + loff_t nf_pos; int flen; unsigned char *fname = NULL; unsigned char *nameptr; @@ -54,10 +56,14 @@ static int do_udf_readdir(struct inode *dir, struct file *filp, uint32_t elen; sector_t offset; int i, num, ret = 0; - unsigned int dt_type; struct extent_position epos = { NULL, 0, {0, 0} }; - struct udf_inode_info *iinfo; + if (ctx->pos == 0) { + if (!dir_emit_dot(file, ctx)) + return 0; + ctx->pos = 1; + } + nf_pos = (ctx->pos - 1) << 2; if (nf_pos >= size) goto out; @@ -71,7 +77,6 @@ static int do_udf_readdir(struct inode *dir, struct file *filp, nf_pos = udf_ext0_offset(dir); fibh.soffset = fibh.eoffset = nf_pos & (dir->i_sb->s_blocksize - 1); - iinfo = UDF_I(dir); if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) { if (inode_bmap(dir, nf_pos >> dir->i_sb->s_blocksize_bits, &epos, &eloc, &elen, &offset) @@ -116,7 +121,9 @@ static int do_udf_readdir(struct inode *dir, struct file *filp, } while (nf_pos < size) { - filp->f_pos = (nf_pos >> 2) + 1; + struct kernel_lb_addr tloc; + + ctx->pos = (nf_pos >> 2) + 1; fi = udf_fileident_read(dir, &nf_pos, &fibh, &cfi, &epos, &eloc, &elen, &offset); @@ -155,24 +162,22 @@ static int do_udf_readdir(struct inode *dir, struct file *filp, } if (cfi.fileCharacteristics & FID_FILE_CHAR_PARENT) { - iblock = parent_ino(filp->f_path.dentry); - flen = 2; - memcpy(fname, "..", flen); - dt_type = DT_DIR; - } else { - struct kernel_lb_addr tloc = lelb_to_cpu(cfi.icb.extLocation); - - iblock = udf_get_lb_pblock(dir->i_sb, &tloc, 0); - flen = udf_get_filename(dir->i_sb, nameptr, fname, lfi); - dt_type = DT_UNKNOWN; + if (!dir_emit_dotdot(file, ctx)) + goto out; + continue; } - if (flen && filldir(dirent, fname, flen, filp->f_pos, - iblock, dt_type) < 0) + flen = udf_get_filename(dir->i_sb, nameptr, fname, lfi); + if (!flen) + continue; + + tloc = lelb_to_cpu(cfi.icb.extLocation); + iblock = udf_get_lb_pblock(dir->i_sb, &tloc, 0); + if (!dir_emit(ctx, fname, flen, iblock, DT_UNKNOWN)) goto out; } /* end while */ - filp->f_pos = (nf_pos >> 2) + 1; + ctx->pos = (nf_pos >> 2) + 1; out: if (fibh.sbh != fibh.ebh) @@ -184,27 +189,11 @@ out: return ret; } -static int udf_readdir(struct file *filp, void *dirent, filldir_t filldir) -{ - struct inode *dir = file_inode(filp); - int result; - - if (filp->f_pos == 0) { - if (filldir(dirent, ".", 1, filp->f_pos, dir->i_ino, DT_DIR) < 0) { - return 0; - } - filp->f_pos++; - } - - result = do_udf_readdir(dir, filp, filldir, dirent); - return result; -} - /* readdir and lookup functions */ const struct file_operations udf_dir_operations = { .llseek = generic_file_llseek, .read = generic_read_dir, - .readdir = udf_readdir, + .iterate = udf_readdir, .unlocked_ioctl = udf_ioctl, .fsync = generic_file_fsync, }; diff --git a/fs/udf/namei.c b/fs/udf/namei.c index 102c072c6bbf..5f6fc17d6bc5 100644 --- a/fs/udf/namei.c +++ b/fs/udf/namei.c @@ -594,6 +594,29 @@ static int udf_create(struct inode *dir, struct dentry *dentry, umode_t mode, return 0; } +static int udf_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode) +{ + struct inode *inode; + struct udf_inode_info *iinfo; + int err; + + inode = udf_new_inode(dir, mode, &err); + if (!inode) + return err; + + iinfo = UDF_I(inode); + if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) + inode->i_data.a_ops = &udf_adinicb_aops; + else + inode->i_data.a_ops = &udf_aops; + inode->i_op = &udf_file_inode_operations; + inode->i_fop = &udf_file_operations; + mark_inode_dirty(inode); + + d_tmpfile(dentry, inode); + return 0; +} + static int udf_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t rdev) { @@ -1311,6 +1334,7 @@ const struct inode_operations udf_dir_inode_operations = { .rmdir = udf_rmdir, .mknod = udf_mknod, .rename = udf_rename, + .tmpfile = udf_tmpfile, }; const struct inode_operations udf_symlink_inode_operations = { .readlink = generic_readlink, diff --git a/fs/ufs/dir.c b/fs/ufs/dir.c index 3a75ca09c506..0ecc2cebed8f 100644 --- a/fs/ufs/dir.c +++ b/fs/ufs/dir.c @@ -430,16 +430,16 @@ ufs_validate_entry(struct super_block *sb, char *base, * This is blatantly stolen from ext2fs */ static int -ufs_readdir(struct file *filp, void *dirent, filldir_t filldir) +ufs_readdir(struct file *file, struct dir_context *ctx) { - loff_t pos = filp->f_pos; - struct inode *inode = file_inode(filp); + loff_t pos = ctx->pos; + struct inode *inode = file_inode(file); struct super_block *sb = inode->i_sb; unsigned int offset = pos & ~PAGE_CACHE_MASK; unsigned long n = pos >> PAGE_CACHE_SHIFT; unsigned long npages = ufs_dir_pages(inode); unsigned chunk_mask = ~(UFS_SB(sb)->s_uspi->s_dirblksize - 1); - int need_revalidate = filp->f_version != inode->i_version; + int need_revalidate = file->f_version != inode->i_version; unsigned flags = UFS_SB(sb)->s_flags; UFSD("BEGIN\n"); @@ -457,16 +457,16 @@ ufs_readdir(struct file *filp, void *dirent, filldir_t filldir) ufs_error(sb, __func__, "bad page in #%lu", inode->i_ino); - filp->f_pos += PAGE_CACHE_SIZE - offset; + ctx->pos += PAGE_CACHE_SIZE - offset; return -EIO; } kaddr = page_address(page); if (unlikely(need_revalidate)) { if (offset) { offset = ufs_validate_entry(sb, kaddr, offset, chunk_mask); - filp->f_pos = (n<<PAGE_CACHE_SHIFT) + offset; + ctx->pos = (n<<PAGE_CACHE_SHIFT) + offset; } - filp->f_version = inode->i_version; + file->f_version = inode->i_version; need_revalidate = 0; } de = (struct ufs_dir_entry *)(kaddr+offset); @@ -479,11 +479,8 @@ ufs_readdir(struct file *filp, void *dirent, filldir_t filldir) return -EIO; } if (de->d_ino) { - int over; unsigned char d_type = DT_UNKNOWN; - offset = (char *)de - kaddr; - UFSD("filldir(%s,%u)\n", de->d_name, fs32_to_cpu(sb, de->d_ino)); UFSD("namlen %u\n", ufs_get_de_namlen(sb, de)); @@ -491,16 +488,15 @@ ufs_readdir(struct file *filp, void *dirent, filldir_t filldir) if ((flags & UFS_DE_MASK) == UFS_DE_44BSD) d_type = de->d_u.d_44.d_type; - over = filldir(dirent, de->d_name, + if (!dir_emit(ctx, de->d_name, ufs_get_de_namlen(sb, de), - (n<<PAGE_CACHE_SHIFT) | offset, - fs32_to_cpu(sb, de->d_ino), d_type); - if (over) { + fs32_to_cpu(sb, de->d_ino), + d_type)) { ufs_put_page(page); return 0; } } - filp->f_pos += fs16_to_cpu(sb, de->d_reclen); + ctx->pos += fs16_to_cpu(sb, de->d_reclen); } ufs_put_page(page); } @@ -660,7 +656,7 @@ not_empty: const struct file_operations ufs_dir_operations = { .read = generic_read_dir, - .readdir = ufs_readdir, + .iterate = ufs_readdir, .fsync = generic_file_fsync, .llseek = generic_file_llseek, }; diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c index 41a695048be7..596ec71da00e 100644 --- a/fs/xfs/xfs_aops.c +++ b/fs/xfs/xfs_aops.c @@ -843,10 +843,12 @@ xfs_cluster_write( STATIC void xfs_vm_invalidatepage( struct page *page, - unsigned long offset) + unsigned int offset, + unsigned int length) { - trace_xfs_invalidatepage(page->mapping->host, page, offset); - block_invalidatepage(page, offset); + trace_xfs_invalidatepage(page->mapping->host, page, offset, + length); + block_invalidatepage(page, offset, length); } /* @@ -910,7 +912,7 @@ next_buffer: xfs_iunlock(ip, XFS_ILOCK_EXCL); out_invalidate: - xfs_vm_invalidatepage(page, 0); + xfs_vm_invalidatepage(page, 0, PAGE_CACHE_SIZE); return; } @@ -940,7 +942,7 @@ xfs_vm_writepage( int count = 0; int nonblocking = 0; - trace_xfs_writepage(inode, page, 0); + trace_xfs_writepage(inode, page, 0, 0); ASSERT(page_has_buffers(page)); @@ -1171,7 +1173,7 @@ xfs_vm_releasepage( { int delalloc, unwritten; - trace_xfs_releasepage(page->mapping->host, page, 0); + trace_xfs_releasepage(page->mapping->host, page, 0, 0); xfs_count_page_state(page, &delalloc, &unwritten); diff --git a/fs/xfs/xfs_attr_leaf.h b/fs/xfs/xfs_attr_leaf.h index f9d7846097e2..444a7704596c 100644 --- a/fs/xfs/xfs_attr_leaf.h +++ b/fs/xfs/xfs_attr_leaf.h @@ -128,6 +128,7 @@ struct xfs_attr3_leaf_hdr { __u8 holes; __u8 pad1; struct xfs_attr_leaf_map freemap[XFS_ATTR_LEAF_MAPSIZE]; + __be32 pad2; /* 64 bit alignment */ }; #define XFS_ATTR3_LEAF_CRC_OFF (offsetof(struct xfs_attr3_leaf_hdr, info.crc)) diff --git a/fs/xfs/xfs_btree.c b/fs/xfs/xfs_btree.c index 8804b8a3c310..0903960410a2 100644 --- a/fs/xfs/xfs_btree.c +++ b/fs/xfs/xfs_btree.c @@ -2544,7 +2544,17 @@ xfs_btree_new_iroot( if (error) goto error0; + /* + * we can't just memcpy() the root in for CRC enabled btree blocks. + * In that case have to also ensure the blkno remains correct + */ memcpy(cblock, block, xfs_btree_block_len(cur)); + if (cur->bc_flags & XFS_BTREE_CRC_BLOCKS) { + if (cur->bc_flags & XFS_BTREE_LONG_PTRS) + cblock->bb_u.l.bb_blkno = cpu_to_be64(cbp->b_bn); + else + cblock->bb_u.s.bb_blkno = cpu_to_be64(cbp->b_bn); + } be16_add_cpu(&block->bb_level, 1); xfs_btree_set_numrecs(block, 1); diff --git a/fs/xfs/xfs_dir2.c b/fs/xfs/xfs_dir2.c index b26a50f9921d..8f023dee404d 100644 --- a/fs/xfs/xfs_dir2.c +++ b/fs/xfs/xfs_dir2.c @@ -368,10 +368,8 @@ xfs_dir_removename( int xfs_readdir( xfs_inode_t *dp, - void *dirent, - size_t bufsize, - xfs_off_t *offset, - filldir_t filldir) + struct dir_context *ctx, + size_t bufsize) { int rval; /* return value */ int v; /* type-checking value */ @@ -385,14 +383,13 @@ xfs_readdir( XFS_STATS_INC(xs_dir_getdents); if (dp->i_d.di_format == XFS_DINODE_FMT_LOCAL) - rval = xfs_dir2_sf_getdents(dp, dirent, offset, filldir); + rval = xfs_dir2_sf_getdents(dp, ctx); else if ((rval = xfs_dir2_isblock(NULL, dp, &v))) ; else if (v) - rval = xfs_dir2_block_getdents(dp, dirent, offset, filldir); + rval = xfs_dir2_block_getdents(dp, ctx); else - rval = xfs_dir2_leaf_getdents(dp, dirent, bufsize, offset, - filldir); + rval = xfs_dir2_leaf_getdents(dp, ctx, bufsize); return rval; } diff --git a/fs/xfs/xfs_dir2_block.c b/fs/xfs/xfs_dir2_block.c index e59f5fc816fe..09aea0247d96 100644 --- a/fs/xfs/xfs_dir2_block.c +++ b/fs/xfs/xfs_dir2_block.c @@ -569,9 +569,7 @@ xfs_dir2_block_addname( int /* error */ xfs_dir2_block_getdents( xfs_inode_t *dp, /* incore inode */ - void *dirent, - xfs_off_t *offset, - filldir_t filldir) + struct dir_context *ctx) { xfs_dir2_data_hdr_t *hdr; /* block header */ struct xfs_buf *bp; /* buffer for block */ @@ -589,7 +587,7 @@ xfs_dir2_block_getdents( /* * If the block number in the offset is out of range, we're done. */ - if (xfs_dir2_dataptr_to_db(mp, *offset) > mp->m_dirdatablk) + if (xfs_dir2_dataptr_to_db(mp, ctx->pos) > mp->m_dirdatablk) return 0; error = xfs_dir3_block_read(NULL, dp, &bp); @@ -600,7 +598,7 @@ xfs_dir2_block_getdents( * Extract the byte offset we start at from the seek pointer. * We'll skip entries before this. */ - wantoff = xfs_dir2_dataptr_to_off(mp, *offset); + wantoff = xfs_dir2_dataptr_to_off(mp, ctx->pos); hdr = bp->b_addr; xfs_dir3_data_check(dp, bp); /* @@ -639,13 +637,12 @@ xfs_dir2_block_getdents( cook = xfs_dir2_db_off_to_dataptr(mp, mp->m_dirdatablk, (char *)dep - (char *)hdr); + ctx->pos = cook & 0x7fffffff; /* * If it didn't fit, set the final offset to here & return. */ - if (filldir(dirent, (char *)dep->name, dep->namelen, - cook & 0x7fffffff, be64_to_cpu(dep->inumber), - DT_UNKNOWN)) { - *offset = cook & 0x7fffffff; + if (!dir_emit(ctx, (char *)dep->name, dep->namelen, + be64_to_cpu(dep->inumber), DT_UNKNOWN)) { xfs_trans_brelse(NULL, bp); return 0; } @@ -655,7 +652,7 @@ xfs_dir2_block_getdents( * Reached the end of the block. * Set the offset to a non-existent block 1 and return. */ - *offset = xfs_dir2_db_off_to_dataptr(mp, mp->m_dirdatablk + 1, 0) & + ctx->pos = xfs_dir2_db_off_to_dataptr(mp, mp->m_dirdatablk + 1, 0) & 0x7fffffff; xfs_trans_brelse(NULL, bp); return 0; diff --git a/fs/xfs/xfs_dir2_format.h b/fs/xfs/xfs_dir2_format.h index 995f1f505a52..7826782b8d78 100644 --- a/fs/xfs/xfs_dir2_format.h +++ b/fs/xfs/xfs_dir2_format.h @@ -266,6 +266,7 @@ struct xfs_dir3_blk_hdr { struct xfs_dir3_data_hdr { struct xfs_dir3_blk_hdr hdr; xfs_dir2_data_free_t best_free[XFS_DIR2_DATA_FD_COUNT]; + __be32 pad; /* 64 bit alignment */ }; #define XFS_DIR3_DATA_CRC_OFF offsetof(struct xfs_dir3_data_hdr, hdr.crc) @@ -477,7 +478,7 @@ struct xfs_dir3_leaf_hdr { struct xfs_da3_blkinfo info; /* header for da routines */ __be16 count; /* count of entries */ __be16 stale; /* count of stale entries */ - __be32 pad; + __be32 pad; /* 64 bit alignment */ }; struct xfs_dir3_icleaf_hdr { @@ -715,7 +716,7 @@ struct xfs_dir3_free_hdr { __be32 firstdb; /* db of first entry */ __be32 nvalid; /* count of valid entries */ __be32 nused; /* count of used entries */ - __be32 pad; /* 64 bit alignment. */ + __be32 pad; /* 64 bit alignment */ }; struct xfs_dir3_free { diff --git a/fs/xfs/xfs_dir2_leaf.c b/fs/xfs/xfs_dir2_leaf.c index da71a1819d78..e0cc1243a8aa 100644 --- a/fs/xfs/xfs_dir2_leaf.c +++ b/fs/xfs/xfs_dir2_leaf.c @@ -1300,10 +1300,8 @@ out: int /* error */ xfs_dir2_leaf_getdents( xfs_inode_t *dp, /* incore directory inode */ - void *dirent, - size_t bufsize, - xfs_off_t *offset, - filldir_t filldir) + struct dir_context *ctx, + size_t bufsize) { struct xfs_buf *bp = NULL; /* data block buffer */ xfs_dir2_data_hdr_t *hdr; /* data block header */ @@ -1322,7 +1320,7 @@ xfs_dir2_leaf_getdents( * If the offset is at or past the largest allowed value, * give up right away. */ - if (*offset >= XFS_DIR2_MAX_DATAPTR) + if (ctx->pos >= XFS_DIR2_MAX_DATAPTR) return 0; mp = dp->i_mount; @@ -1343,7 +1341,7 @@ xfs_dir2_leaf_getdents( * Inside the loop we keep the main offset value as a byte offset * in the directory file. */ - curoff = xfs_dir2_dataptr_to_byte(mp, *offset); + curoff = xfs_dir2_dataptr_to_byte(mp, ctx->pos); /* * Force this conversion through db so we truncate the offset @@ -1444,8 +1442,8 @@ xfs_dir2_leaf_getdents( dep = (xfs_dir2_data_entry_t *)ptr; length = xfs_dir2_data_entsize(dep->namelen); - if (filldir(dirent, (char *)dep->name, dep->namelen, - xfs_dir2_byte_to_dataptr(mp, curoff) & 0x7fffffff, + ctx->pos = xfs_dir2_byte_to_dataptr(mp, curoff) & 0x7fffffff; + if (!dir_emit(ctx, (char *)dep->name, dep->namelen, be64_to_cpu(dep->inumber), DT_UNKNOWN)) break; @@ -1462,9 +1460,9 @@ xfs_dir2_leaf_getdents( * All done. Set output offset value to current offset. */ if (curoff > xfs_dir2_dataptr_to_byte(mp, XFS_DIR2_MAX_DATAPTR)) - *offset = XFS_DIR2_MAX_DATAPTR & 0x7fffffff; + ctx->pos = XFS_DIR2_MAX_DATAPTR & 0x7fffffff; else - *offset = xfs_dir2_byte_to_dataptr(mp, curoff) & 0x7fffffff; + ctx->pos = xfs_dir2_byte_to_dataptr(mp, curoff) & 0x7fffffff; kmem_free(map_info); if (bp) xfs_trans_brelse(NULL, bp); diff --git a/fs/xfs/xfs_dir2_priv.h b/fs/xfs/xfs_dir2_priv.h index 7cf573c88aad..0511cda4a712 100644 --- a/fs/xfs/xfs_dir2_priv.h +++ b/fs/xfs/xfs_dir2_priv.h @@ -33,8 +33,8 @@ extern int xfs_dir_cilookup_result(struct xfs_da_args *args, extern const struct xfs_buf_ops xfs_dir3_block_buf_ops; extern int xfs_dir2_block_addname(struct xfs_da_args *args); -extern int xfs_dir2_block_getdents(struct xfs_inode *dp, void *dirent, - xfs_off_t *offset, filldir_t filldir); +extern int xfs_dir2_block_getdents(struct xfs_inode *dp, + struct dir_context *ctx); extern int xfs_dir2_block_lookup(struct xfs_da_args *args); extern int xfs_dir2_block_removename(struct xfs_da_args *args); extern int xfs_dir2_block_replace(struct xfs_da_args *args); @@ -91,8 +91,8 @@ extern void xfs_dir3_leaf_compact(struct xfs_da_args *args, extern void xfs_dir3_leaf_compact_x1(struct xfs_dir3_icleaf_hdr *leafhdr, struct xfs_dir2_leaf_entry *ents, int *indexp, int *lowstalep, int *highstalep, int *lowlogp, int *highlogp); -extern int xfs_dir2_leaf_getdents(struct xfs_inode *dp, void *dirent, - size_t bufsize, xfs_off_t *offset, filldir_t filldir); +extern int xfs_dir2_leaf_getdents(struct xfs_inode *dp, struct dir_context *ctx, + size_t bufsize); extern int xfs_dir3_leaf_get_buf(struct xfs_da_args *args, xfs_dir2_db_t bno, struct xfs_buf **bpp, __uint16_t magic); extern void xfs_dir3_leaf_log_ents(struct xfs_trans *tp, struct xfs_buf *bp, @@ -153,8 +153,7 @@ extern int xfs_dir2_block_to_sf(struct xfs_da_args *args, struct xfs_buf *bp, int size, xfs_dir2_sf_hdr_t *sfhp); extern int xfs_dir2_sf_addname(struct xfs_da_args *args); extern int xfs_dir2_sf_create(struct xfs_da_args *args, xfs_ino_t pino); -extern int xfs_dir2_sf_getdents(struct xfs_inode *dp, void *dirent, - xfs_off_t *offset, filldir_t filldir); +extern int xfs_dir2_sf_getdents(struct xfs_inode *dp, struct dir_context *ctx); extern int xfs_dir2_sf_lookup(struct xfs_da_args *args); extern int xfs_dir2_sf_removename(struct xfs_da_args *args); extern int xfs_dir2_sf_replace(struct xfs_da_args *args); diff --git a/fs/xfs/xfs_dir2_sf.c b/fs/xfs/xfs_dir2_sf.c index 6157424dbf8f..97676a347da1 100644 --- a/fs/xfs/xfs_dir2_sf.c +++ b/fs/xfs/xfs_dir2_sf.c @@ -768,9 +768,7 @@ xfs_dir2_sf_create( int /* error */ xfs_dir2_sf_getdents( xfs_inode_t *dp, /* incore directory inode */ - void *dirent, - xfs_off_t *offset, - filldir_t filldir) + struct dir_context *ctx) { int i; /* shortform entry number */ xfs_mount_t *mp; /* filesystem mount point */ @@ -802,7 +800,7 @@ xfs_dir2_sf_getdents( /* * If the block number in the offset is out of range, we're done. */ - if (xfs_dir2_dataptr_to_db(mp, *offset) > mp->m_dirdatablk) + if (xfs_dir2_dataptr_to_db(mp, ctx->pos) > mp->m_dirdatablk) return 0; /* @@ -819,22 +817,20 @@ xfs_dir2_sf_getdents( /* * Put . entry unless we're starting past it. */ - if (*offset <= dot_offset) { - if (filldir(dirent, ".", 1, dot_offset & 0x7fffffff, dp->i_ino, DT_DIR)) { - *offset = dot_offset & 0x7fffffff; + if (ctx->pos <= dot_offset) { + ctx->pos = dot_offset & 0x7fffffff; + if (!dir_emit(ctx, ".", 1, dp->i_ino, DT_DIR)) return 0; - } } /* * Put .. entry unless we're starting past it. */ - if (*offset <= dotdot_offset) { + if (ctx->pos <= dotdot_offset) { ino = xfs_dir2_sf_get_parent_ino(sfp); - if (filldir(dirent, "..", 2, dotdot_offset & 0x7fffffff, ino, DT_DIR)) { - *offset = dotdot_offset & 0x7fffffff; + ctx->pos = dotdot_offset & 0x7fffffff; + if (!dir_emit(ctx, "..", 2, ino, DT_DIR)) return 0; - } } /* @@ -845,21 +841,20 @@ xfs_dir2_sf_getdents( off = xfs_dir2_db_off_to_dataptr(mp, mp->m_dirdatablk, xfs_dir2_sf_get_offset(sfep)); - if (*offset > off) { + if (ctx->pos > off) { sfep = xfs_dir2_sf_nextentry(sfp, sfep); continue; } ino = xfs_dir2_sfe_get_ino(sfp, sfep); - if (filldir(dirent, (char *)sfep->name, sfep->namelen, - off & 0x7fffffff, ino, DT_UNKNOWN)) { - *offset = off & 0x7fffffff; + ctx->pos = off & 0x7fffffff; + if (!dir_emit(ctx, (char *)sfep->name, sfep->namelen, + ino, DT_UNKNOWN)) return 0; - } sfep = xfs_dir2_sf_nextentry(sfp, sfep); } - *offset = xfs_dir2_db_off_to_dataptr(mp, mp->m_dirdatablk + 1, 0) & + ctx->pos = xfs_dir2_db_off_to_dataptr(mp, mp->m_dirdatablk + 1, 0) & 0x7fffffff; return 0; } diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c index a5f2042aec8b..de3dc98f4e8f 100644 --- a/fs/xfs/xfs_file.c +++ b/fs/xfs/xfs_file.c @@ -906,11 +906,10 @@ xfs_file_release( STATIC int xfs_file_readdir( - struct file *filp, - void *dirent, - filldir_t filldir) + struct file *file, + struct dir_context *ctx) { - struct inode *inode = file_inode(filp); + struct inode *inode = file_inode(file); xfs_inode_t *ip = XFS_I(inode); int error; size_t bufsize; @@ -929,8 +928,7 @@ xfs_file_readdir( */ bufsize = (size_t)min_t(loff_t, 32768, ip->i_d.di_size); - error = xfs_readdir(ip, dirent, bufsize, - (xfs_off_t *)&filp->f_pos, filldir); + error = xfs_readdir(ip, ctx, bufsize); if (error) return -error; return 0; @@ -1270,8 +1268,7 @@ xfs_seek_data( } out: - if (offset != file->f_pos) - file->f_pos = offset; + offset = vfs_setpos(file, offset, inode->i_sb->s_maxbytes); out_unlock: xfs_iunlock_map_shared(ip, lock); @@ -1379,8 +1376,7 @@ out: * situation in particular. */ offset = min_t(loff_t, offset, isize); - if (offset != file->f_pos) - file->f_pos = offset; + offset = vfs_setpos(file, offset, inode->i_sb->s_maxbytes); out_unlock: xfs_iunlock_map_shared(ip, lock); @@ -1432,7 +1428,7 @@ const struct file_operations xfs_file_operations = { const struct file_operations xfs_dir_file_operations = { .open = xfs_dir_open, .read = generic_read_dir, - .readdir = xfs_file_readdir, + .iterate = xfs_file_readdir, .llseek = generic_file_llseek, .unlocked_ioctl = xfs_file_ioctl, #ifdef CONFIG_COMPAT diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c index 45a85ff84da1..7cf5e4eafe28 100644 --- a/fs/xfs/xfs_log_recover.c +++ b/fs/xfs/xfs_log_recover.c @@ -1845,7 +1845,13 @@ xlog_recover_do_inode_buffer( xfs_agino_t *buffer_nextp; trace_xfs_log_recover_buf_inode_buf(mp->m_log, buf_f); - bp->b_ops = &xfs_inode_buf_ops; + + /* + * Post recovery validation only works properly on CRC enabled + * filesystems. + */ + if (xfs_sb_version_hascrc(&mp->m_sb)) + bp->b_ops = &xfs_inode_buf_ops; inodes_per_buf = BBTOB(bp->b_io_length) >> mp->m_sb.sb_inodelog; for (i = 0; i < inodes_per_buf; i++) { @@ -2205,7 +2211,16 @@ xlog_recover_do_reg_buffer( /* Shouldn't be any more regions */ ASSERT(i == item->ri_total); - xlog_recovery_validate_buf_type(mp, bp, buf_f); + /* + * We can only do post recovery validation on items on CRC enabled + * fielsystems as we need to know when the buffer was written to be able + * to determine if we should have replayed the item. If we replay old + * metadata over a newer buffer, then it will enter a temporarily + * inconsistent state resulting in verification failures. Hence for now + * just avoid the verification stage for non-crc filesystems + */ + if (xfs_sb_version_hascrc(&mp->m_sb)) + xlog_recovery_validate_buf_type(mp, bp, buf_f); } /* diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c index f6bfbd734669..e8e310c05097 100644 --- a/fs/xfs/xfs_mount.c +++ b/fs/xfs/xfs_mount.c @@ -314,7 +314,8 @@ STATIC int xfs_mount_validate_sb( xfs_mount_t *mp, xfs_sb_t *sbp, - bool check_inprogress) + bool check_inprogress, + bool check_version) { /* @@ -337,9 +338,10 @@ xfs_mount_validate_sb( /* * Version 5 superblock feature mask validation. Reject combinations the - * kernel cannot support up front before checking anything else. + * kernel cannot support up front before checking anything else. For + * write validation, we don't need to check feature masks. */ - if (XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5) { + if (check_version && XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5) { xfs_alert(mp, "Version 5 superblock detected. This kernel has EXPERIMENTAL support enabled!\n" "Use of these features in this kernel is at your own risk!"); @@ -675,7 +677,8 @@ xfs_sb_to_disk( static int xfs_sb_verify( - struct xfs_buf *bp) + struct xfs_buf *bp, + bool check_version) { struct xfs_mount *mp = bp->b_target->bt_mount; struct xfs_sb sb; @@ -686,7 +689,8 @@ xfs_sb_verify( * Only check the in progress field for the primary superblock as * mkfs.xfs doesn't clear it from secondary superblocks. */ - return xfs_mount_validate_sb(mp, &sb, bp->b_bn == XFS_SB_DADDR); + return xfs_mount_validate_sb(mp, &sb, bp->b_bn == XFS_SB_DADDR, + check_version); } /* @@ -719,7 +723,7 @@ xfs_sb_read_verify( goto out_error; } } - error = xfs_sb_verify(bp); + error = xfs_sb_verify(bp, true); out_error: if (error) { @@ -758,7 +762,7 @@ xfs_sb_write_verify( struct xfs_buf_log_item *bip = bp->b_fspriv; int error; - error = xfs_sb_verify(bp); + error = xfs_sb_verify(bp, false); if (error) { XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, bp->b_addr); xfs_buf_ioerror(bp, error); diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h index aa4db3307d36..a04701de6bbd 100644 --- a/fs/xfs/xfs_trace.h +++ b/fs/xfs/xfs_trace.h @@ -974,14 +974,16 @@ DEFINE_RW_EVENT(xfs_file_splice_read); DEFINE_RW_EVENT(xfs_file_splice_write); DECLARE_EVENT_CLASS(xfs_page_class, - TP_PROTO(struct inode *inode, struct page *page, unsigned long off), - TP_ARGS(inode, page, off), + TP_PROTO(struct inode *inode, struct page *page, unsigned long off, + unsigned int len), + TP_ARGS(inode, page, off, len), TP_STRUCT__entry( __field(dev_t, dev) __field(xfs_ino_t, ino) __field(pgoff_t, pgoff) __field(loff_t, size) __field(unsigned long, offset) + __field(unsigned int, length) __field(int, delalloc) __field(int, unwritten) ), @@ -995,24 +997,27 @@ DECLARE_EVENT_CLASS(xfs_page_class, __entry->pgoff = page_offset(page); __entry->size = i_size_read(inode); __entry->offset = off; + __entry->length = len; __entry->delalloc = delalloc; __entry->unwritten = unwritten; ), TP_printk("dev %d:%d ino 0x%llx pgoff 0x%lx size 0x%llx offset %lx " - "delalloc %d unwritten %d", + "length %x delalloc %d unwritten %d", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->ino, __entry->pgoff, __entry->size, __entry->offset, + __entry->length, __entry->delalloc, __entry->unwritten) ) #define DEFINE_PAGE_EVENT(name) \ DEFINE_EVENT(xfs_page_class, name, \ - TP_PROTO(struct inode *inode, struct page *page, unsigned long off), \ - TP_ARGS(inode, page, off)) + TP_PROTO(struct inode *inode, struct page *page, unsigned long off, \ + unsigned int len), \ + TP_ARGS(inode, page, off, len)) DEFINE_PAGE_EVENT(xfs_writepage); DEFINE_PAGE_EVENT(xfs_releasepage); DEFINE_PAGE_EVENT(xfs_invalidatepage); diff --git a/fs/xfs/xfs_vnodeops.h b/fs/xfs/xfs_vnodeops.h index 5163022d9808..38c67c34d73f 100644 --- a/fs/xfs/xfs_vnodeops.h +++ b/fs/xfs/xfs_vnodeops.h @@ -31,8 +31,7 @@ int xfs_remove(struct xfs_inode *dp, struct xfs_name *name, struct xfs_inode *ip); int xfs_link(struct xfs_inode *tdp, struct xfs_inode *sip, struct xfs_name *target_name); -int xfs_readdir(struct xfs_inode *dp, void *dirent, size_t bufsize, - xfs_off_t *offset, filldir_t filldir); +int xfs_readdir(struct xfs_inode *dp, struct dir_context *ctx, size_t bufsize); int xfs_symlink(struct xfs_inode *dp, struct xfs_name *link_name, const char *target_path, umode_t mode, struct xfs_inode **ipp); int xfs_set_dmattrs(struct xfs_inode *ip, u_int evmask, u_int16_t state); |