diff options
author | Christoph Hellwig <hch@infradead.org> | 2013-11-14 20:50:30 +0400 |
---|---|---|
committer | Trond Myklebust <trond.myklebust@primarydata.com> | 2014-01-14 02:29:49 +0400 |
commit | 1f90ee27461e31a1c18e5d819f6ea6f5c7304b16 (patch) | |
tree | 96a85b91576188ed1921c672d96adabe3a025bfa | |
parent | 2a009ec98cce440c0992fc9a2353e96cdb0b048b (diff) | |
download | linux-1f90ee27461e31a1c18e5d819f6ea6f5c7304b16.tar.xz |
nfs: increment i_dio_count for reads, too
i_dio_count is used to protect dio access against truncate. We want
to make sure there are no dio reads pending either when doing a
truncate. I suspect on plain NFS things might work even without
this, but once we use a pnfs layout driver that access backing devices
directly things will go bad without the proper synchronization.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Trond Myklebust <trond.myklebust@primarydata.com>
-rw-r--r-- | fs/nfs/direct.c | 9 |
1 files changed, 6 insertions, 3 deletions
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c index 75ed2a90b0f2..6c232107e835 100644 --- a/fs/nfs/direct.c +++ b/fs/nfs/direct.c @@ -235,10 +235,10 @@ static void nfs_direct_complete(struct nfs_direct_req *dreq, bool write) spin_unlock(&inode->i_lock); } - if (write) { + if (write) nfs_zap_mapping(inode, inode->i_mapping); - inode_dio_done(inode); - } + + inode_dio_done(inode); if (dreq->iocb) { long res = (long) dreq->error; @@ -419,6 +419,7 @@ static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq, loff_t pos, bool uio) { struct nfs_pageio_descriptor desc; + struct inode *inode = dreq->inode; ssize_t result = -EINVAL; size_t requested_bytes = 0; unsigned long seg; @@ -427,6 +428,7 @@ static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq, &nfs_direct_read_completion_ops); get_dreq(dreq); desc.pg_dreq = dreq; + atomic_inc(&inode->i_dio_count); for (seg = 0; seg < nr_segs; seg++) { const struct iovec *vec = &iov[seg]; @@ -446,6 +448,7 @@ static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq, * generic layer handle the completion. */ if (requested_bytes == 0) { + inode_dio_done(inode); nfs_direct_req_release(dreq); return result < 0 ? result : -EIO; } |