diff options
Diffstat (limited to 'block/blk-mq.c')
-rw-r--r-- | block/blk-mq.c | 152 |
1 files changed, 69 insertions, 83 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c index 9c8dc70020bc..d3494a796ba8 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -658,7 +658,8 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q, * allocator for this for the rare use case of a command tied to * a specific queue. */ - if (WARN_ON_ONCE(!(flags & (BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_RESERVED)))) + if (WARN_ON_ONCE(!(flags & BLK_MQ_REQ_NOWAIT)) || + WARN_ON_ONCE(!(flags & BLK_MQ_REQ_RESERVED))) return ERR_PTR(-EINVAL); if (hctx_idx >= q->nr_hw_queues) @@ -1825,12 +1826,13 @@ static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode, static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx, struct request *rq) { - struct sbitmap_queue *sbq = &hctx->tags->bitmap_tags; + struct sbitmap_queue *sbq; struct wait_queue_head *wq; wait_queue_entry_t *wait; bool ret; - if (!(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) { + if (!(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) && + !(blk_mq_is_shared_tags(hctx->flags))) { blk_mq_sched_mark_restart_hctx(hctx); /* @@ -1848,6 +1850,10 @@ static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx, if (!list_empty_careful(&wait->entry)) return false; + if (blk_mq_tag_is_reserved(rq->mq_hctx->sched_tags, rq->internal_tag)) + sbq = &hctx->tags->breserved_tags; + else + sbq = &hctx->tags->bitmap_tags; wq = &bt_wait_ptr(sbq, hctx)->wait; spin_lock_irq(&wq->lock); @@ -1917,16 +1923,6 @@ static void blk_mq_update_dispatch_busy(struct blk_mq_hw_ctx *hctx, bool busy) static void blk_mq_handle_dev_resource(struct request *rq, struct list_head *list) { - struct request *next = - list_first_entry_or_null(list, struct request, queuelist); - - /* - * If an I/O scheduler has been configured and we got a driver tag for - * the next request already, free it. - */ - if (next) - blk_mq_put_driver_tag(next); - list_add(&rq->queuelist, list); __blk_mq_requeue_request(rq); } @@ -2002,6 +1998,23 @@ static void blk_mq_release_budgets(struct request_queue *q, } /* + * blk_mq_commit_rqs will notify driver using bd->last that there is no + * more requests. (See comment in struct blk_mq_ops for commit_rqs for + * details) + * Attention, we should explicitly call this in unusual cases: + * 1) did not queue everything initially scheduled to queue + * 2) the last attempt to queue a request failed + */ +static void blk_mq_commit_rqs(struct blk_mq_hw_ctx *hctx, int queued, + bool from_schedule) +{ + if (hctx->queue->mq_ops->commit_rqs && queued) { + trace_block_unplug(hctx->queue, queued, !from_schedule); + hctx->queue->mq_ops->commit_rqs(hctx); + } +} + +/* * Returns true if we did some work AND can potentially do more. */ bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list, @@ -2009,8 +2022,8 @@ bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list, { enum prep_dispatch prep; struct request_queue *q = hctx->queue; - struct request *rq, *nxt; - int errors, queued; + struct request *rq; + int queued; blk_status_t ret = BLK_STS_OK; LIST_HEAD(zone_list); bool needs_resource = false; @@ -2021,7 +2034,7 @@ bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list, /* * Now process all the entries, sending them to the driver. */ - errors = queued = 0; + queued = 0; do { struct blk_mq_queue_data bd; @@ -2035,17 +2048,7 @@ bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list, list_del_init(&rq->queuelist); bd.rq = rq; - - /* - * Flag last if we have no more requests, or if we have more - * but can't assign a driver tag to it. - */ - if (list_empty(list)) - bd.last = true; - else { - nxt = list_first_entry(list, struct request, queuelist); - bd.last = !blk_mq_get_driver_tag(nxt); - } + bd.last = list_empty(list); /* * once the request is queued to lld, no need to cover the @@ -2074,7 +2077,6 @@ bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list, needs_resource = true; break; default: - errors++; blk_mq_end_request(rq, ret); } } while (!list_empty(list)); @@ -2085,9 +2087,9 @@ out: /* If we didn't flush the entire list, we could have told the driver * there was more coming, but that turned out to be a lie. */ - if ((!list_empty(list) || errors || needs_resource || - ret == BLK_STS_DEV_RESOURCE) && q->mq_ops->commit_rqs && queued) - q->mq_ops->commit_rqs(hctx); + if (!list_empty(list) || ret != BLK_STS_OK) + blk_mq_commit_rqs(hctx, queued, false); + /* * Any items that need requeuing? Stuff them into hctx->dispatch, * that is where we will continue on next queue run. @@ -2096,7 +2098,8 @@ out: bool needs_restart; /* For non-shared tags, the RESTART check will suffice */ bool no_tag = prep == PREP_DISPATCH_NO_TAG && - (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED); + ((hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) || + blk_mq_is_shared_tags(hctx->flags)); if (nr_budgets) blk_mq_release_budgets(q, list); @@ -2151,10 +2154,10 @@ out: blk_mq_update_dispatch_busy(hctx, true); return false; - } else - blk_mq_update_dispatch_busy(hctx, false); + } - return (queued + errors) != 0; + blk_mq_update_dispatch_busy(hctx, false); + return true; } /** @@ -2548,16 +2551,6 @@ void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx, spin_unlock(&ctx->lock); } -static void blk_mq_commit_rqs(struct blk_mq_hw_ctx *hctx, int *queued, - bool from_schedule) -{ - if (hctx->queue->mq_ops->commit_rqs) { - trace_block_unplug(hctx->queue, *queued, !from_schedule); - hctx->queue->mq_ops->commit_rqs(hctx); - } - *queued = 0; -} - static void blk_mq_bio_to_request(struct request *rq, struct bio *bio, unsigned int nr_segs) { @@ -2681,20 +2674,21 @@ static blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last) return __blk_mq_try_issue_directly(rq->mq_hctx, rq, true, last); } -static void blk_mq_plug_issue_direct(struct blk_plug *plug, bool from_schedule) +static void blk_mq_plug_issue_direct(struct blk_plug *plug) { struct blk_mq_hw_ctx *hctx = NULL; struct request *rq; int queued = 0; - int errors = 0; + blk_status_t ret = BLK_STS_OK; while ((rq = rq_list_pop(&plug->mq_list))) { bool last = rq_list_empty(plug->mq_list); - blk_status_t ret; if (hctx != rq->mq_hctx) { - if (hctx) - blk_mq_commit_rqs(hctx, &queued, from_schedule); + if (hctx) { + blk_mq_commit_rqs(hctx, queued, false); + queued = 0; + } hctx = rq->mq_hctx; } @@ -2706,21 +2700,16 @@ static void blk_mq_plug_issue_direct(struct blk_plug *plug, bool from_schedule) case BLK_STS_RESOURCE: case BLK_STS_DEV_RESOURCE: blk_mq_request_bypass_insert(rq, false, true); - blk_mq_commit_rqs(hctx, &queued, from_schedule); - return; + goto out; default: blk_mq_end_request(rq, ret); - errors++; break; } } - /* - * If we didn't flush the entire list, we could have told the driver - * there was more coming, but that turned out to be a lie. - */ - if (errors) - blk_mq_commit_rqs(hctx, &queued, from_schedule); +out: + if (ret != BLK_STS_OK) + blk_mq_commit_rqs(hctx, queued, false); } static void __blk_mq_flush_plug_list(struct request_queue *q, @@ -2791,7 +2780,7 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule) } blk_mq_run_dispatch_ops(q, - blk_mq_plug_issue_direct(plug, false)); + blk_mq_plug_issue_direct(plug)); if (rq_list_empty(plug->mq_list)) return; } @@ -2805,36 +2794,32 @@ void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx, struct list_head *list) { int queued = 0; - int errors = 0; + blk_status_t ret = BLK_STS_OK; while (!list_empty(list)) { - blk_status_t ret; struct request *rq = list_first_entry(list, struct request, queuelist); list_del_init(&rq->queuelist); ret = blk_mq_request_issue_directly(rq, list_empty(list)); - if (ret != BLK_STS_OK) { - errors++; - if (ret == BLK_STS_RESOURCE || - ret == BLK_STS_DEV_RESOURCE) { - blk_mq_request_bypass_insert(rq, false, - list_empty(list)); - break; - } - blk_mq_end_request(rq, ret); - } else + switch (ret) { + case BLK_STS_OK: queued++; + break; + case BLK_STS_RESOURCE: + case BLK_STS_DEV_RESOURCE: + blk_mq_request_bypass_insert(rq, false, + list_empty(list)); + goto out; + default: + blk_mq_end_request(rq, ret); + break; + } } - /* - * If we didn't flush the entire list, we could have told - * the driver there was more coming, but that turned out to - * be a lie. - */ - if ((!list_empty(list) || errors) && - hctx->queue->mq_ops->commit_rqs && queued) - hctx->queue->mq_ops->commit_rqs(hctx); +out: + if (ret != BLK_STS_OK) + blk_mq_commit_rqs(hctx, queued, false); } static bool blk_mq_attempt_bio_merge(struct request_queue *q, @@ -2894,15 +2879,16 @@ static inline struct request *blk_mq_get_cached_request(struct request_queue *q, if (!plug) return NULL; - rq = rq_list_peek(&plug->cached_rq); - if (!rq || rq->q != q) - return NULL; if (blk_mq_attempt_bio_merge(q, *bio, nsegs)) { *bio = NULL; return NULL; } + rq = rq_list_peek(&plug->cached_rq); + if (!rq || rq->q != q) + return NULL; + type = blk_mq_get_hctx_type((*bio)->bi_opf); hctx_type = rq->mq_hctx->type; if (type != hctx_type && |