diff options
Diffstat (limited to 'fs/nfs')
-rw-r--r-- | fs/nfs/callback.c | 18 | ||||
-rw-r--r-- | fs/nfs/dir.c | 8 | ||||
-rw-r--r-- | fs/nfs/getroot.c | 4 | ||||
-rw-r--r-- | fs/nfs/namespace.c | 29 | ||||
-rw-r--r-- | fs/nfs/nfs4proc.c | 8 | ||||
-rw-r--r-- | fs/nfs/nfs4state.c | 4 | ||||
-rw-r--r-- | fs/nfs/read.c | 10 | ||||
-rw-r--r-- | fs/nfs/super.c | 4 | ||||
-rw-r--r-- | fs/nfs/write.c | 24 |
9 files changed, 66 insertions, 43 deletions
diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c index bd185a572a23..ecc06c619494 100644 --- a/fs/nfs/callback.c +++ b/fs/nfs/callback.c @@ -105,7 +105,7 @@ static void nfs_callback_svc(struct svc_rqst *rqstp) */ int nfs_callback_up(void) { - struct svc_serv *serv; + struct svc_serv *serv = NULL; int ret = 0; lock_kernel(); @@ -122,24 +122,30 @@ int nfs_callback_up(void) ret = svc_create_xprt(serv, "tcp", nfs_callback_set_tcpport, SVC_SOCK_ANONYMOUS); if (ret <= 0) - goto out_destroy; + goto out_err; nfs_callback_tcpport = ret; dprintk("Callback port = 0x%x\n", nfs_callback_tcpport); ret = svc_create_thread(nfs_callback_svc, serv); if (ret < 0) - goto out_destroy; + goto out_err; nfs_callback_info.serv = serv; wait_for_completion(&nfs_callback_info.started); out: + /* + * svc_create creates the svc_serv with sv_nrthreads == 1, and then + * svc_create_thread increments that. So we need to call svc_destroy + * on both success and failure so that the refcount is 1 when the + * thread exits. + */ + if (serv) + svc_destroy(serv); mutex_unlock(&nfs_callback_mutex); unlock_kernel(); return ret; -out_destroy: +out_err: dprintk("Couldn't create callback socket or server thread; err = %d\n", ret); - svc_destroy(serv); -out_err: nfs_callback_info.users--; goto out; } diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index 476cb0f837fd..ae04892a5e5d 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -154,7 +154,6 @@ typedef struct { struct nfs_entry *entry; decode_dirent_t decode; int plus; - int error; unsigned long timestamp; int timestamp_valid; } nfs_readdir_descriptor_t; @@ -213,7 +212,6 @@ int nfs_readdir_filler(nfs_readdir_descriptor_t *desc, struct page *page) return 0; error: unlock_page(page); - desc->error = error; return -EIO; } @@ -483,13 +481,13 @@ int uncached_readdir(nfs_readdir_descriptor_t *desc, void *dirent, goto out; } timestamp = jiffies; - desc->error = NFS_PROTO(inode)->readdir(file->f_path.dentry, cred, *desc->dir_cookie, - page, + status = NFS_PROTO(inode)->readdir(file->f_path.dentry, cred, + *desc->dir_cookie, page, NFS_SERVER(inode)->dtsize, desc->plus); desc->page = page; desc->ptr = kmap(page); /* matching kunmap in nfs_do_filldir */ - if (desc->error >= 0) { + if (status >= 0) { desc->timestamp = timestamp; desc->timestamp_valid = 1; if ((status = dir_decode(desc)) == 0) diff --git a/fs/nfs/getroot.c b/fs/nfs/getroot.c index e6242cdbaf91..fae97196daad 100644 --- a/fs/nfs/getroot.c +++ b/fs/nfs/getroot.c @@ -96,7 +96,7 @@ struct dentry *nfs_get_root(struct super_block *sb, struct nfs_fh *mntfh) inode = nfs_fhget(sb, mntfh, fsinfo.fattr); if (IS_ERR(inode)) { dprintk("nfs_get_root: get root inode failed\n"); - return ERR_PTR(PTR_ERR(inode)); + return ERR_CAST(inode); } error = nfs_superblock_set_dummy_root(sb, inode); @@ -266,7 +266,7 @@ struct dentry *nfs4_get_root(struct super_block *sb, struct nfs_fh *mntfh) inode = nfs_fhget(sb, mntfh, &fattr); if (IS_ERR(inode)) { dprintk("nfs_get_root: get root inode failed\n"); - return ERR_PTR(PTR_ERR(inode)); + return ERR_CAST(inode); } error = nfs_superblock_set_dummy_root(sb, inode); diff --git a/fs/nfs/namespace.c b/fs/nfs/namespace.c index be4ce1c3a3d8..607f6eb9cdb5 100644 --- a/fs/nfs/namespace.c +++ b/fs/nfs/namespace.c @@ -107,38 +107,40 @@ static void * nfs_follow_mountpoint(struct dentry *dentry, struct nameidata *nd) BUG_ON(IS_ROOT(dentry)); dprintk("%s: enter\n", __FUNCTION__); - dput(nd->dentry); - nd->dentry = dget(dentry); + dput(nd->path.dentry); + nd->path.dentry = dget(dentry); /* Look it up again */ - parent = dget_parent(nd->dentry); + parent = dget_parent(nd->path.dentry); err = server->nfs_client->rpc_ops->lookup(parent->d_inode, - &nd->dentry->d_name, + &nd->path.dentry->d_name, &fh, &fattr); dput(parent); if (err != 0) goto out_err; if (fattr.valid & NFS_ATTR_FATTR_V4_REFERRAL) - mnt = nfs_do_refmount(nd->mnt, nd->dentry); + mnt = nfs_do_refmount(nd->path.mnt, nd->path.dentry); else - mnt = nfs_do_submount(nd->mnt, nd->dentry, &fh, &fattr); + mnt = nfs_do_submount(nd->path.mnt, nd->path.dentry, &fh, + &fattr); err = PTR_ERR(mnt); if (IS_ERR(mnt)) goto out_err; mntget(mnt); - err = do_add_mount(mnt, nd, nd->mnt->mnt_flags|MNT_SHRINKABLE, &nfs_automount_list); + err = do_add_mount(mnt, nd, nd->path.mnt->mnt_flags|MNT_SHRINKABLE, + &nfs_automount_list); if (err < 0) { mntput(mnt); if (err == -EBUSY) goto out_follow; goto out_err; } - mntput(nd->mnt); - dput(nd->dentry); - nd->mnt = mnt; - nd->dentry = dget(mnt->mnt_root); + mntput(nd->path.mnt); + dput(nd->path.dentry); + nd->path.mnt = mnt; + nd->path.dentry = dget(mnt->mnt_root); schedule_delayed_work(&nfs_automount_task, nfs_mountpoint_expiry_timeout); out: dprintk("%s: done, returned %d\n", __FUNCTION__, err); @@ -146,10 +148,11 @@ out: dprintk("<-- nfs_follow_mountpoint() = %d\n", err); return ERR_PTR(err); out_err: - path_release(nd); + path_put(&nd->path); goto out; out_follow: - while(d_mountpoint(nd->dentry) && follow_down(&nd->mnt, &nd->dentry)) + while (d_mountpoint(nd->path.dentry) && + follow_down(&nd->path.mnt, &nd->path.dentry)) ; err = 0; goto out; diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 027e1095256e..7ce07862c2fb 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -1384,11 +1384,11 @@ out_close: struct dentry * nfs4_atomic_open(struct inode *dir, struct dentry *dentry, struct nameidata *nd) { - struct dentry *parent; struct path path = { - .mnt = nd->mnt, + .mnt = nd->path.mnt, .dentry = dentry, }; + struct dentry *parent; struct iattr attr; struct rpc_cred *cred; struct nfs4_state *state; @@ -1433,7 +1433,7 @@ int nfs4_open_revalidate(struct inode *dir, struct dentry *dentry, int openflags, struct nameidata *nd) { struct path path = { - .mnt = nd->mnt, + .mnt = nd->path.mnt, .dentry = dentry, }; struct rpc_cred *cred; @@ -1885,7 +1885,7 @@ nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr, int flags, struct nameidata *nd) { struct path path = { - .mnt = nd->mnt, + .mnt = nd->path.mnt, .dentry = dentry, }; struct nfs4_state *state; diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index f9c7432471dc..6233eb5e98c1 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -682,8 +682,8 @@ static void nfs_increment_seqid(int status, struct nfs_seqid *seqid) if (seqid->sequence->flags & NFS_SEQID_CONFIRMED) return; printk(KERN_WARNING "NFS: v4 server returned a bad" - "sequence-id error on an" - "unconfirmed sequence %p!\n", + " sequence-id error on an" + " unconfirmed sequence %p!\n", seqid->sequence); case -NFS4ERR_STALE_CLIENTID: case -NFS4ERR_STALE_STATEID: diff --git a/fs/nfs/read.c b/fs/nfs/read.c index 8fd6dfbe1bc3..3d7d9631e125 100644 --- a/fs/nfs/read.c +++ b/fs/nfs/read.c @@ -79,7 +79,7 @@ void nfs_readdata_release(void *data) static int nfs_return_empty_page(struct page *page) { - zero_user_page(page, 0, PAGE_CACHE_SIZE, KM_USER0); + zero_user(page, 0, PAGE_CACHE_SIZE); SetPageUptodate(page); unlock_page(page); return 0; @@ -103,10 +103,10 @@ static void nfs_readpage_truncate_uninitialised_page(struct nfs_read_data *data) pglen = PAGE_CACHE_SIZE - base; for (;;) { if (remainder <= pglen) { - zero_user_page(*pages, base, remainder, KM_USER0); + zero_user(*pages, base, remainder); break; } - zero_user_page(*pages, base, pglen, KM_USER0); + zero_user(*pages, base, pglen); pages++; remainder -= pglen; pglen = PAGE_CACHE_SIZE; @@ -130,7 +130,7 @@ static int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode, return PTR_ERR(new); } if (len < PAGE_CACHE_SIZE) - zero_user_page(page, len, PAGE_CACHE_SIZE - len, KM_USER0); + zero_user_segment(page, len, PAGE_CACHE_SIZE); nfs_list_add_request(new, &one_request); if (NFS_SERVER(inode)->rsize < PAGE_CACHE_SIZE) @@ -532,7 +532,7 @@ readpage_async_filler(void *data, struct page *page) goto out_error; if (len < PAGE_CACHE_SIZE) - zero_user_page(page, len, PAGE_CACHE_SIZE - len, KM_USER0); + zero_user_segment(page, len, PAGE_CACHE_SIZE); nfs_pageio_add_request(desc->pgio, new); return 0; out_error: diff --git a/fs/nfs/super.c b/fs/nfs/super.c index 7f4505f6ac6f..1fb381843650 100644 --- a/fs/nfs/super.c +++ b/fs/nfs/super.c @@ -190,6 +190,10 @@ static match_table_t nfs_secflavor_tokens = { { Opt_sec_lkeyi, "lkeyi" }, { Opt_sec_lkeyp, "lkeyp" }, + { Opt_sec_spkm, "spkm3" }, + { Opt_sec_spkmi, "spkm3i" }, + { Opt_sec_spkmp, "spkm3p" }, + { Opt_sec_err, NULL } }; diff --git a/fs/nfs/write.c b/fs/nfs/write.c index 522efff3e2c5..f55c437124a2 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -665,9 +665,7 @@ zero_page: * then we need to zero any uninitalised data. */ if (req->wb_pgbase == 0 && req->wb_bytes != PAGE_CACHE_SIZE && !PageUptodate(req->wb_page)) - zero_user_page(req->wb_page, req->wb_bytes, - PAGE_CACHE_SIZE - req->wb_bytes, - KM_USER0); + zero_user_segment(req->wb_page, req->wb_bytes, PAGE_CACHE_SIZE); return req; } @@ -699,6 +697,17 @@ int nfs_flush_incompatible(struct file *file, struct page *page) } /* + * If the page cache is marked as unsafe or invalid, then we can't rely on + * the PageUptodate() flag. In this case, we will need to turn off + * write optimisations that depend on the page contents being correct. + */ +static int nfs_write_pageuptodate(struct page *page, struct inode *inode) +{ + return PageUptodate(page) && + !(NFS_I(inode)->cache_validity & (NFS_INO_REVAL_PAGECACHE|NFS_INO_INVALID_DATA)); +} + +/* * Update and possibly write a cached page of an NFS file. * * XXX: Keep an eye on generic_file_read to make sure it doesn't do bad @@ -719,10 +728,13 @@ int nfs_updatepage(struct file *file, struct page *page, (long long)(page_offset(page) +offset)); /* If we're not using byte range locks, and we know the page - * is entirely in cache, it may be more efficient to avoid - * fragmenting write requests. + * is up to date, it may be more efficient to extend the write + * to cover the entire page in order to avoid fragmentation + * inefficiencies. */ - if (PageUptodate(page) && inode->i_flock == NULL && !(file->f_mode & O_SYNC)) { + if (nfs_write_pageuptodate(page, inode) && + inode->i_flock == NULL && + !(file->f_mode & O_SYNC)) { count = max(count + offset, nfs_page_length(page)); offset = 0; } |