diff options
author | Dan Williams <dan.j.williams@intel.com> | 2016-06-02 07:03:32 +0300 |
---|---|---|
committer | Dan Williams <dan.j.williams@intel.com> | 2016-07-13 01:13:48 +0300 |
commit | 14df6a4e7eba62442aed70dfd5bbd76d2e491d50 (patch) | |
tree | 953bed6c450e5806be374b32a6b774a4b4045f1b /fs/dax.c | |
parent | 476f848aaee466fd5d74f123fa652e757f2baeba (diff) | |
download | linux-14df6a4e7eba62442aed70dfd5bbd76d2e491d50.tar.xz |
fs/dax: remove wmb_pmem()
Flushing posted-write queues is now deferred to REQ_FLUSH context, or
otherwise handled by an ADR event at the platform level.
Cc: Ross Zwisler <ross.zwisler@linux.intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'fs/dax.c')
-rw-r--r-- | fs/dax.c | 7 |
1 files changed, 1 insertions, 6 deletions
@@ -147,7 +147,7 @@ static ssize_t dax_io(struct inode *inode, struct iov_iter *iter, struct buffer_head *bh) { loff_t pos = start, max = start, bh_max = start; - bool hole = false, need_wmb = false; + bool hole = false; struct block_device *bdev = NULL; int rw = iov_iter_rw(iter), rc; long map_len = 0; @@ -213,7 +213,6 @@ static ssize_t dax_io(struct inode *inode, struct iov_iter *iter, if (iov_iter_rw(iter) == WRITE) { len = copy_from_iter_pmem(dax.addr, max - pos, iter); - need_wmb = true; } else if (!hole) len = copy_to_iter((void __force *) dax.addr, max - pos, iter); @@ -230,8 +229,6 @@ static ssize_t dax_io(struct inode *inode, struct iov_iter *iter, dax.addr += len; } - if (need_wmb) - wmb_pmem(); dax_unmap_atomic(bdev, &dax); return (pos == start) ? rc : pos - start; @@ -783,7 +780,6 @@ int dax_writeback_mapping_range(struct address_space *mapping, return ret; } } - wmb_pmem(); return 0; } EXPORT_SYMBOL_GPL(dax_writeback_mapping_range); @@ -1227,7 +1223,6 @@ int __dax_zero_page_range(struct block_device *bdev, sector_t sector, if (dax_map_atomic(bdev, &dax) < 0) return PTR_ERR(dax.addr); clear_pmem(dax.addr + offset, length); - wmb_pmem(); dax_unmap_atomic(bdev, &dax); } return 0; |