summaryrefslogtreecommitdiff
path: root/drivers/md/dm-rq.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md/dm-rq.c')
-rw-r--r--drivers/md/dm-rq.c50
1 files changed, 30 insertions, 20 deletions
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index 0b081d170087..a48130b90157 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -280,14 +280,14 @@ static void dm_requeue_original_request(struct dm_rq_target_io *tio, bool delay_
if (!rq->q->mq_ops)
dm_old_requeue_request(rq);
else
- dm_mq_delay_requeue_request(rq, delay_requeue ? 5000 : 0);
+ dm_mq_delay_requeue_request(rq, delay_requeue ? 100/*ms*/ : 0);
rq_completed(md, rw, false);
}
static void dm_done(struct request *clone, int error, bool mapped)
{
- int r = error;
+ int r = DM_ENDIO_DONE;
struct dm_rq_target_io *tio = clone->end_io_data;
dm_request_endio_fn rq_end_io = NULL;
@@ -298,20 +298,28 @@ static void dm_done(struct request *clone, int error, bool mapped)
r = rq_end_io(tio->ti, clone, error, &tio->info);
}
- if (unlikely(r == -EREMOTEIO && (req_op(clone) == REQ_OP_WRITE_SAME) &&
- !clone->q->limits.max_write_same_sectors))
- disable_write_same(tio->md);
+ if (unlikely(error == -EREMOTEIO)) {
+ if (req_op(clone) == REQ_OP_WRITE_SAME &&
+ !clone->q->limits.max_write_same_sectors)
+ disable_write_same(tio->md);
+ if (req_op(clone) == REQ_OP_WRITE_ZEROES &&
+ !clone->q->limits.max_write_zeroes_sectors)
+ disable_write_zeroes(tio->md);
+ }
- if (r <= 0)
+ switch (r) {
+ case DM_ENDIO_DONE:
/* The target wants to complete the I/O */
- dm_end_request(clone, r);
- else if (r == DM_ENDIO_INCOMPLETE)
+ dm_end_request(clone, error);
+ break;
+ case DM_ENDIO_INCOMPLETE:
/* The target will handle the I/O */
return;
- else if (r == DM_ENDIO_REQUEUE)
+ case DM_ENDIO_REQUEUE:
/* The target wants to requeue the I/O */
dm_requeue_original_request(tio, false);
- else {
+ break;
+ default:
DMWARN("unimplemented target endio return value: %d", r);
BUG();
}
@@ -358,7 +366,7 @@ static void dm_complete_request(struct request *rq, int error)
if (!rq->q->mq_ops)
blk_complete_request(rq);
else
- blk_mq_complete_request(rq, error);
+ blk_mq_complete_request(rq);
}
/*
@@ -496,14 +504,12 @@ static int map_request(struct dm_rq_target_io *tio)
/* The target wants to requeue the I/O after a delay */
dm_requeue_original_request(tio, true);
break;
- default:
- if (r > 0) {
- DMWARN("unimplemented target map return value: %d", r);
- BUG();
- }
-
+ case DM_MAPIO_KILL:
/* The target wants to complete the I/O */
- dm_kill_unmapped_request(rq, r);
+ dm_kill_unmapped_request(rq, -EIO);
+ default:
+ DMWARN("unimplemented target map return value: %d", r);
+ BUG();
}
return r;
@@ -762,7 +768,7 @@ static int dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
return BLK_MQ_RQ_QUEUE_OK;
}
-static struct blk_mq_ops dm_mq_ops = {
+static const struct blk_mq_ops dm_mq_ops = {
.queue_rq = dm_mq_queue_rq,
.complete = dm_softirq_done,
.init_request = dm_mq_init_request,
@@ -810,10 +816,14 @@ int dm_mq_init_request_queue(struct mapped_device *md, struct dm_table *t)
dm_init_md_queue(md);
/* backfill 'mq' sysfs registration normally done in blk_register_queue */
- blk_mq_register_dev(disk_to_dev(md->disk), q);
+ err = blk_mq_register_dev(disk_to_dev(md->disk), q);
+ if (err)
+ goto out_cleanup_queue;
return 0;
+out_cleanup_queue:
+ blk_cleanup_queue(q);
out_tag_set:
blk_mq_free_tag_set(md->tag_set);
out_kfree_tag_set: