summaryrefslogtreecommitdiff
path: root/drivers/block/xen-blkfront.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/block/xen-blkfront.c')
-rw-r--r--drivers/block/xen-blkfront.c117
1 files changed, 59 insertions, 58 deletions
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index fcc5b4e0aef2..be4fea6a5dd3 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -196,6 +196,7 @@ struct blkfront_info
unsigned int nr_ring_pages;
struct request_queue *rq;
unsigned int feature_flush;
+ unsigned int feature_fua;
unsigned int feature_discard:1;
unsigned int feature_secdiscard:1;
unsigned int discard_granularity;
@@ -547,7 +548,7 @@ static int blkif_queue_discard_req(struct request *req, struct blkfront_ring_inf
ring_req->u.discard.nr_sectors = blk_rq_sectors(req);
ring_req->u.discard.id = id;
ring_req->u.discard.sector_number = (blkif_sector_t)blk_rq_pos(req);
- if ((req->cmd_flags & REQ_SECURE) && info->feature_secdiscard)
+ if (req_op(req) == REQ_OP_SECURE_ERASE && info->feature_secdiscard)
ring_req->u.discard.flag = BLKIF_DISCARD_SECURE;
else
ring_req->u.discard.flag = 0;
@@ -746,7 +747,7 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri
* The indirect operation can only be a BLKIF_OP_READ or
* BLKIF_OP_WRITE
*/
- BUG_ON(req->cmd_flags & (REQ_FLUSH | REQ_FUA));
+ BUG_ON(req_op(req) == REQ_OP_FLUSH || req->cmd_flags & REQ_FUA);
ring_req->operation = BLKIF_OP_INDIRECT;
ring_req->u.indirect.indirect_op = rq_data_dir(req) ?
BLKIF_OP_WRITE : BLKIF_OP_READ;
@@ -758,7 +759,7 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri
ring_req->u.rw.handle = info->handle;
ring_req->operation = rq_data_dir(req) ?
BLKIF_OP_WRITE : BLKIF_OP_READ;
- if (req->cmd_flags & (REQ_FLUSH | REQ_FUA)) {
+ if (req_op(req) == REQ_OP_FLUSH || req->cmd_flags & REQ_FUA) {
/*
* Ideally we can do an unordered flush-to-disk.
* In case the backend onlysupports barriers, use that.
@@ -766,19 +767,14 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri
* implement it the same way. (It's also a FLUSH+FUA,
* since it is guaranteed ordered WRT previous writes.)
*/
- switch (info->feature_flush &
- ((REQ_FLUSH|REQ_FUA))) {
- case REQ_FLUSH|REQ_FUA:
+ if (info->feature_flush && info->feature_fua)
ring_req->operation =
BLKIF_OP_WRITE_BARRIER;
- break;
- case REQ_FLUSH:
+ else if (info->feature_flush)
ring_req->operation =
BLKIF_OP_FLUSH_DISKCACHE;
- break;
- default:
+ else
ring_req->operation = 0;
- }
}
ring_req->u.rw.nr_segments = num_grant;
if (unlikely(require_extra_req)) {
@@ -847,7 +843,8 @@ static int blkif_queue_request(struct request *req, struct blkfront_ring_info *r
if (unlikely(rinfo->dev_info->connected != BLKIF_STATE_CONNECTED))
return 1;
- if (unlikely(req->cmd_flags & (REQ_DISCARD | REQ_SECURE)))
+ if (unlikely(req_op(req) == REQ_OP_DISCARD ||
+ req_op(req) == REQ_OP_SECURE_ERASE))
return blkif_queue_discard_req(req, rinfo);
else
return blkif_queue_rw_req(req, rinfo);
@@ -867,10 +864,10 @@ static inline bool blkif_request_flush_invalid(struct request *req,
struct blkfront_info *info)
{
return ((req->cmd_type != REQ_TYPE_FS) ||
- ((req->cmd_flags & REQ_FLUSH) &&
- !(info->feature_flush & REQ_FLUSH)) ||
+ ((req_op(req) == REQ_OP_FLUSH) &&
+ !info->feature_flush) ||
((req->cmd_flags & REQ_FUA) &&
- !(info->feature_flush & REQ_FUA)));
+ !info->feature_fua));
}
static int blkif_queue_rq(struct blk_mq_hw_ctx *hctx,
@@ -955,7 +952,7 @@ static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size,
rq->limits.discard_granularity = info->discard_granularity;
rq->limits.discard_alignment = info->discard_alignment;
if (info->feature_secdiscard)
- queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, rq);
+ queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, rq);
}
/* Hard sector size and max sectors impersonate the equiv. hardware. */
@@ -981,24 +978,22 @@ static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size,
return 0;
}
-static const char *flush_info(unsigned int feature_flush)
+static const char *flush_info(struct blkfront_info *info)
{
- switch (feature_flush & ((REQ_FLUSH | REQ_FUA))) {
- case REQ_FLUSH|REQ_FUA:
+ if (info->feature_flush && info->feature_fua)
return "barrier: enabled;";
- case REQ_FLUSH:
+ else if (info->feature_flush)
return "flush diskcache: enabled;";
- default:
+ else
return "barrier or flush: disabled;";
- }
}
static void xlvbd_flush(struct blkfront_info *info)
{
- blk_queue_write_cache(info->rq, info->feature_flush & REQ_FLUSH,
- info->feature_flush & REQ_FUA);
+ blk_queue_write_cache(info->rq, info->feature_flush ? true : false,
+ info->feature_fua ? true : false);
pr_info("blkfront: %s: %s %s %s %s %s\n",
- info->gd->disk_name, flush_info(info->feature_flush),
+ info->gd->disk_name, flush_info(info),
"persistent grants:", info->feature_persistent ?
"enabled;" : "disabled;", "indirect descriptors:",
info->max_indirect_segments ? "enabled;" : "disabled;");
@@ -1139,7 +1134,6 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
gd->first_minor = minor;
gd->fops = &xlvbd_block_fops;
gd->private_data = info;
- gd->driverfs_dev = &(info->xbdev->dev);
set_capacity(gd, capacity);
if (xlvbd_init_blk_queue(gd, sector_size, physical_sector_size,
@@ -1597,7 +1591,7 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
info->feature_discard = 0;
info->feature_secdiscard = 0;
queue_flag_clear(QUEUE_FLAG_DISCARD, rq);
- queue_flag_clear(QUEUE_FLAG_SECDISCARD, rq);
+ queue_flag_clear(QUEUE_FLAG_SECERASE, rq);
}
blk_mq_complete_request(req, error);
break;
@@ -1617,6 +1611,7 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
if (unlikely(error)) {
if (error == -EOPNOTSUPP)
error = 0;
+ info->feature_fua = 0;
info->feature_flush = 0;
xlvbd_flush(info);
}
@@ -2064,7 +2059,7 @@ static int blkif_recover(struct blkfront_info *info)
bio_trim(cloned_bio, offset, size);
cloned_bio->bi_private = split_bio;
cloned_bio->bi_end_io = split_bio_end;
- submit_bio(cloned_bio->bi_rw, cloned_bio);
+ submit_bio(cloned_bio);
}
/*
* Now we have to wait for all those smaller bios to
@@ -2073,7 +2068,7 @@ static int blkif_recover(struct blkfront_info *info)
continue;
}
/* We don't need to split this bio */
- submit_bio(bio->bi_rw, bio);
+ submit_bio(bio);
}
return 0;
@@ -2108,11 +2103,16 @@ static int blkfront_resume(struct xenbus_device *dev)
/*
* Get the bios in the request so we can re-queue them.
*/
- if (shadow[j].request->cmd_flags &
- (REQ_FLUSH | REQ_FUA | REQ_DISCARD | REQ_SECURE)) {
+ if (req_op(shadow[i].request) == REQ_OP_FLUSH ||
+ req_op(shadow[i].request) == REQ_OP_DISCARD ||
+ req_op(shadow[i].request) == REQ_OP_SECURE_ERASE ||
+ shadow[j].request->cmd_flags & REQ_FUA) {
/*
* Flush operations don't contain bios, so
* we need to requeue the whole request
+ *
+ * XXX: but this doesn't make any sense for a
+ * write with the FUA flag set..
*/
list_add(&shadow[j].request->queuelist, &info->requests);
continue;
@@ -2197,10 +2197,9 @@ static void blkfront_setup_discard(struct blkfront_info *info)
info->discard_granularity = discard_granularity;
info->discard_alignment = discard_alignment;
}
- err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
- "discard-secure", "%d", &discard_secure,
- NULL);
- if (!err)
+ err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
+ "discard-secure", "%u", &discard_secure);
+ if (err > 0)
info->feature_secdiscard = !!discard_secure;
}
@@ -2298,10 +2297,10 @@ static void blkfront_gather_backend_features(struct blkfront_info *info)
unsigned int indirect_segments;
info->feature_flush = 0;
+ info->feature_fua = 0;
- err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
- "feature-barrier", "%d", &barrier,
- NULL);
+ err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
+ "feature-barrier", "%d", &barrier);
/*
* If there's no "feature-barrier" defined, then it means
@@ -2310,38 +2309,40 @@ static void blkfront_gather_backend_features(struct blkfront_info *info)
*
* If there are barriers, then we use flush.
*/
- if (!err && barrier)
- info->feature_flush = REQ_FLUSH | REQ_FUA;
+ if (err > 0 && barrier) {
+ info->feature_flush = 1;
+ info->feature_fua = 1;
+ }
+
/*
* And if there is "feature-flush-cache" use that above
* barriers.
*/
- err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
- "feature-flush-cache", "%d", &flush,
- NULL);
+ err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
+ "feature-flush-cache", "%d", &flush);
- if (!err && flush)
- info->feature_flush = REQ_FLUSH;
+ if (err > 0 && flush) {
+ info->feature_flush = 1;
+ info->feature_fua = 0;
+ }
- err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
- "feature-discard", "%d", &discard,
- NULL);
+ err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
+ "feature-discard", "%d", &discard);
- if (!err && discard)
+ if (err > 0 && discard)
blkfront_setup_discard(info);
- err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
- "feature-persistent", "%u", &persistent,
- NULL);
- if (err)
+ err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
+ "feature-persistent", "%d", &persistent);
+ if (err <= 0)
info->feature_persistent = 0;
else
info->feature_persistent = persistent;
- err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
- "feature-max-indirect-segments", "%u", &indirect_segments,
- NULL);
- if (err)
+ err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
+ "feature-max-indirect-segments", "%u",
+ &indirect_segments);
+ if (err <= 0)
info->max_indirect_segments = 0;
else
info->max_indirect_segments = min(indirect_segments,
@@ -2441,7 +2442,7 @@ static void blkfront_connect(struct blkfront_info *info)
for (i = 0; i < info->nr_rings; i++)
kick_pending_request_queues(&info->rinfo[i]);
- add_disk(info->gd);
+ device_add_disk(&info->xbdev->dev, info->gd);
info->is_ready = 1;
}