diff options
Diffstat (limited to 'drivers/md/dm-rq.c')
-rw-r--r-- | drivers/md/dm-rq.c | 113 |
1 files changed, 71 insertions, 42 deletions
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c index 1ca7463e8bb2..dc75bea0d541 100644 --- a/drivers/md/dm-rq.c +++ b/drivers/md/dm-rq.c @@ -73,15 +73,24 @@ static void dm_old_start_queue(struct request_queue *q) spin_unlock_irqrestore(q->queue_lock, flags); } +static void dm_mq_start_queue(struct request_queue *q) +{ + unsigned long flags; + + spin_lock_irqsave(q->queue_lock, flags); + queue_flag_clear(QUEUE_FLAG_STOPPED, q); + spin_unlock_irqrestore(q->queue_lock, flags); + + blk_mq_start_stopped_hw_queues(q, true); + blk_mq_kick_requeue_list(q); +} + void dm_start_queue(struct request_queue *q) { if (!q->mq_ops) dm_old_start_queue(q); - else { - queue_flag_clear_unlocked(QUEUE_FLAG_STOPPED, q); - blk_mq_start_stopped_hw_queues(q, true); - blk_mq_kick_requeue_list(q); - } + else + dm_mq_start_queue(q); } static void dm_old_stop_queue(struct request_queue *q) @@ -89,27 +98,35 @@ static void dm_old_stop_queue(struct request_queue *q) unsigned long flags; spin_lock_irqsave(q->queue_lock, flags); + if (!blk_queue_stopped(q)) + blk_stop_queue(q); + spin_unlock_irqrestore(q->queue_lock, flags); +} + +static void dm_mq_stop_queue(struct request_queue *q) +{ + unsigned long flags; + + spin_lock_irqsave(q->queue_lock, flags); if (blk_queue_stopped(q)) { spin_unlock_irqrestore(q->queue_lock, flags); return; } - blk_stop_queue(q); + queue_flag_set(QUEUE_FLAG_STOPPED, q); spin_unlock_irqrestore(q->queue_lock, flags); + + /* Avoid that requeuing could restart the queue. */ + blk_mq_cancel_requeue_work(q); + blk_mq_stop_hw_queues(q); } void dm_stop_queue(struct request_queue *q) { if (!q->mq_ops) dm_old_stop_queue(q); - else { - spin_lock_irq(q->queue_lock); - queue_flag_set(QUEUE_FLAG_STOPPED, q); - spin_unlock_irq(q->queue_lock); - - blk_mq_cancel_requeue_work(q); - blk_mq_stop_hw_queues(q); - } + else + dm_mq_stop_queue(q); } static struct dm_rq_target_io *alloc_old_rq_tio(struct mapped_device *md, @@ -319,21 +336,32 @@ static void dm_old_requeue_request(struct request *rq) spin_unlock_irqrestore(q->queue_lock, flags); } -static void dm_mq_requeue_request(struct request *rq) +static void __dm_mq_kick_requeue_list(struct request_queue *q, unsigned long msecs) { - struct request_queue *q = rq->q; unsigned long flags; - blk_mq_requeue_request(rq); spin_lock_irqsave(q->queue_lock, flags); if (!blk_queue_stopped(q)) - blk_mq_kick_requeue_list(q); + blk_mq_delay_kick_requeue_list(q, msecs); spin_unlock_irqrestore(q->queue_lock, flags); } -static void dm_requeue_original_request(struct mapped_device *md, - struct request *rq) +void dm_mq_kick_requeue_list(struct mapped_device *md) +{ + __dm_mq_kick_requeue_list(dm_get_md_queue(md), 0); +} +EXPORT_SYMBOL(dm_mq_kick_requeue_list); + +static void dm_mq_delay_requeue_request(struct request *rq, unsigned long msecs) +{ + blk_mq_requeue_request(rq); + __dm_mq_kick_requeue_list(rq->q, msecs); +} + +static void dm_requeue_original_request(struct dm_rq_target_io *tio, bool delay_requeue) { + struct mapped_device *md = tio->md; + struct request *rq = tio->orig; int rw = rq_data_dir(rq); rq_end_stats(md, rq); @@ -342,7 +370,7 @@ static void dm_requeue_original_request(struct mapped_device *md, if (!rq->q->mq_ops) dm_old_requeue_request(rq); else - dm_mq_requeue_request(rq); + dm_mq_delay_requeue_request(rq, delay_requeue ? 5000 : 0); rq_completed(md, rw, false); } @@ -372,7 +400,7 @@ static void dm_done(struct request *clone, int error, bool mapped) return; else if (r == DM_ENDIO_REQUEUE) /* The target wants to requeue the I/O */ - dm_requeue_original_request(tio->md, tio->orig); + dm_requeue_original_request(tio, false); else { DMWARN("unimplemented target endio return value: %d", r); BUG(); @@ -553,7 +581,7 @@ static void init_tio(struct dm_rq_target_io *tio, struct request *rq, if (!md->init_tio_pdu) memset(&tio->info, 0, sizeof(tio->info)); if (md->kworker_task) - init_kthread_work(&tio->work, map_tio_request); + kthread_init_work(&tio->work, map_tio_request); } static struct dm_rq_target_io *dm_old_prep_tio(struct request *rq, @@ -612,20 +640,23 @@ static int dm_old_prep_fn(struct request_queue *q, struct request *rq) /* * Returns: - * 0 : the request has been processed - * DM_MAPIO_REQUEUE : the original request needs to be requeued + * DM_MAPIO_* : the request has been processed as indicated + * DM_MAPIO_REQUEUE : the original request needs to be immediately requeued * < 0 : the request was completed due to failure */ -static int map_request(struct dm_rq_target_io *tio, struct request *rq, - struct mapped_device *md) +static int map_request(struct dm_rq_target_io *tio) { int r; struct dm_target *ti = tio->ti; + struct mapped_device *md = tio->md; + struct request *rq = tio->orig; struct request *clone = NULL; if (tio->clone) { clone = tio->clone; r = ti->type->map_rq(ti, clone, &tio->info); + if (r == DM_MAPIO_DELAY_REQUEUE) + return DM_MAPIO_REQUEUE; /* .request_fn requeue is always immediate */ } else { r = ti->type->clone_and_map_rq(ti, rq, &tio->info, &clone); if (r < 0) { @@ -633,9 +664,8 @@ static int map_request(struct dm_rq_target_io *tio, struct request *rq, dm_kill_unmapped_request(rq, r); return r; } - if (r != DM_MAPIO_REMAPPED) - return r; - if (setup_clone(clone, rq, tio, GFP_ATOMIC)) { + if (r == DM_MAPIO_REMAPPED && + setup_clone(clone, rq, tio, GFP_ATOMIC)) { /* -ENOMEM */ ti->type->release_clone_rq(clone); return DM_MAPIO_REQUEUE; @@ -654,7 +684,10 @@ static int map_request(struct dm_rq_target_io *tio, struct request *rq, break; case DM_MAPIO_REQUEUE: /* The target wants to requeue the I/O */ - dm_requeue_original_request(md, tio->orig); + break; + case DM_MAPIO_DELAY_REQUEUE: + /* The target wants to requeue the I/O after a delay */ + dm_requeue_original_request(tio, true); break; default: if (r > 0) { @@ -664,10 +697,9 @@ static int map_request(struct dm_rq_target_io *tio, struct request *rq, /* The target wants to complete the I/O */ dm_kill_unmapped_request(rq, r); - return r; } - return 0; + return r; } static void dm_start_request(struct mapped_device *md, struct request *orig) @@ -706,11 +738,9 @@ static void dm_start_request(struct mapped_device *md, struct request *orig) static void map_tio_request(struct kthread_work *work) { struct dm_rq_target_io *tio = container_of(work, struct dm_rq_target_io, work); - struct request *rq = tio->orig; - struct mapped_device *md = tio->md; - if (map_request(tio, rq, md) == DM_MAPIO_REQUEUE) - dm_requeue_original_request(md, rq); + if (map_request(tio) == DM_MAPIO_REQUEUE) + dm_requeue_original_request(tio, false); } ssize_t dm_attr_rq_based_seq_io_merge_deadline_show(struct mapped_device *md, char *buf) @@ -801,7 +831,7 @@ static void dm_old_request_fn(struct request_queue *q) tio = tio_from_request(rq); /* Establish tio->ti before queuing work (map_tio_request) */ tio->ti = ti; - queue_kthread_work(&md->kworker, &tio->work); + kthread_queue_work(&md->kworker, &tio->work); BUG_ON(!irqs_disabled()); } } @@ -823,7 +853,7 @@ int dm_old_init_request_queue(struct mapped_device *md) blk_queue_prep_rq(md->queue, dm_old_prep_fn); /* Initialize the request-based DM worker thread */ - init_kthread_worker(&md->kworker); + kthread_init_worker(&md->kworker); md->kworker_task = kthread_run(kthread_worker_fn, &md->kworker, "kdmwork-%s", dm_device_name(md)); if (IS_ERR(md->kworker_task)) @@ -896,7 +926,7 @@ static int dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx, tio->ti = ti; /* Direct call is fine since .queue_rq allows allocations */ - if (map_request(tio, rq, md) == DM_MAPIO_REQUEUE) { + if (map_request(tio) == DM_MAPIO_REQUEUE) { /* Undo dm_start_request() before requeuing */ rq_end_stats(md, rq); rq_completed(md, rq_data_dir(rq), false); @@ -908,7 +938,6 @@ static int dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx, static struct blk_mq_ops dm_mq_ops = { .queue_rq = dm_mq_queue_rq, - .map_queue = blk_mq_map_queue, .complete = dm_softirq_done, .init_request = dm_mq_init_request, }; @@ -955,7 +984,7 @@ int dm_mq_init_request_queue(struct mapped_device *md, struct dm_table *t) dm_init_md_queue(md); /* backfill 'mq' sysfs registration normally done in blk_register_queue */ - blk_mq_register_disk(md->disk); + blk_mq_register_dev(disk_to_dev(md->disk), q); return 0; |