summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--drivers/block/xen-blkfront.c14
1 files changed, 12 insertions, 2 deletions
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 31c8a643d109..76b874a79175 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -286,8 +286,18 @@ static int blkif_queue_request(struct request *req)
ring_req->operation = rq_data_dir(req) ?
BLKIF_OP_WRITE : BLKIF_OP_READ;
- if (req->cmd_flags & REQ_FLUSH)
+
+ if (req->cmd_flags & (REQ_FLUSH | REQ_FUA)) {
+ /*
+ * Ideally we could just do an unordered
+ * flush-to-disk, but all we have is a full write
+ * barrier at the moment. However, a barrier write is
+ * a superset of FUA, so we can implement it the same
+ * way. (It's also a FLUSH+FUA, since it is
+ * guaranteed ordered WRT previous writes.)
+ */
ring_req->operation = BLKIF_OP_WRITE_BARRIER;
+ }
ring_req->nr_segments = blk_rq_map_sg(req->q, req, info->sg);
BUG_ON(ring_req->nr_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST);
@@ -1065,7 +1075,7 @@ static void blkfront_connect(struct blkfront_info *info)
info->feature_flush = 0;
if (!err && barrier)
- info->feature_flush = REQ_FLUSH;
+ info->feature_flush = REQ_FLUSH | REQ_FUA;
err = xlvbd_alloc_gendisk(sectors, info, binfo, sector_size);
if (err) {