diff options
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-mq.c | 16 | ||||
-rw-r--r-- | block/blk-softirq.c | 27 | ||||
-rw-r--r-- | block/blk-throttle.c | 6 |
3 files changed, 28 insertions, 21 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c index e9ebe9864cc4..dc5f47f60931 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -266,17 +266,29 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q, int rw, if (ret) return ERR_PTR(ret); + /* + * Check if the hardware context is actually mapped to anything. + * If not tell the caller that it should skip this queue. + */ hctx = q->queue_hw_ctx[hctx_idx]; + if (!blk_mq_hw_queue_mapped(hctx)) { + ret = -EXDEV; + goto out_queue_exit; + } ctx = __blk_mq_get_ctx(q, cpumask_first(hctx->cpumask)); blk_mq_set_alloc_data(&alloc_data, q, flags, ctx, hctx); rq = __blk_mq_alloc_request(&alloc_data, rw, 0); if (!rq) { - blk_queue_exit(q); - return ERR_PTR(-EWOULDBLOCK); + ret = -EWOULDBLOCK; + goto out_queue_exit; } return rq; + +out_queue_exit: + blk_queue_exit(q); + return ERR_PTR(ret); } EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx); diff --git a/block/blk-softirq.c b/block/blk-softirq.c index 53b1737e978d..96631e6a22b9 100644 --- a/block/blk-softirq.c +++ b/block/blk-softirq.c @@ -78,30 +78,21 @@ static int raise_blk_irq(int cpu, struct request *rq) } #endif -static int blk_cpu_notify(struct notifier_block *self, unsigned long action, - void *hcpu) +static int blk_softirq_cpu_dead(unsigned int cpu) { /* * If a CPU goes away, splice its entries to the current CPU * and trigger a run of the softirq */ - if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) { - int cpu = (unsigned long) hcpu; - - local_irq_disable(); - list_splice_init(&per_cpu(blk_cpu_done, cpu), - this_cpu_ptr(&blk_cpu_done)); - raise_softirq_irqoff(BLOCK_SOFTIRQ); - local_irq_enable(); - } + local_irq_disable(); + list_splice_init(&per_cpu(blk_cpu_done, cpu), + this_cpu_ptr(&blk_cpu_done)); + raise_softirq_irqoff(BLOCK_SOFTIRQ); + local_irq_enable(); - return NOTIFY_OK; + return 0; } -static struct notifier_block blk_cpu_notifier = { - .notifier_call = blk_cpu_notify, -}; - void __blk_complete_request(struct request *req) { int ccpu, cpu; @@ -180,7 +171,9 @@ static __init int blk_softirq_init(void) INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i)); open_softirq(BLOCK_SOFTIRQ, blk_done_softirq); - register_hotcpu_notifier(&blk_cpu_notifier); + cpuhp_setup_state_nocalls(CPUHP_BLOCK_SOFTIRQ_DEAD, + "block/softirq:dead", NULL, + blk_softirq_cpu_dead); return 0; } subsys_initcall(blk_softirq_init); diff --git a/block/blk-throttle.c b/block/blk-throttle.c index f1aba26f4719..a3ea8260c94c 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c @@ -780,9 +780,11 @@ static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio, /* * If previous slice expired, start a new one otherwise renew/extend * existing slice to make sure it is at least throtl_slice interval - * long since now. + * long since now. New slice is started only for empty throttle group. + * If there is queued bio, that means there should be an active + * slice and it should be extended instead. */ - if (throtl_slice_used(tg, rw)) + if (throtl_slice_used(tg, rw) && !(tg->service_queue.nr_queued[rw])) throtl_start_new_slice(tg, rw); else { if (time_before(tg->slice_end[rw], jiffies + throtl_slice)) |