diff options
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-core.c | 21 | ||||
-rw-r--r-- | block/blk-mq-tag.c | 14 | ||||
-rw-r--r-- | block/blk-mq-tag.h | 1 | ||||
-rw-r--r-- | block/blk-mq.c | 104 | ||||
-rw-r--r-- | block/blk-mq.h | 3 | ||||
-rw-r--r-- | block/blk-sysfs.c | 2 | ||||
-rw-r--r-- | block/blk-timeout.c | 3 | ||||
-rw-r--r-- | block/partitions/efi.c | 2 |
8 files changed, 131 insertions, 19 deletions
diff --git a/block/blk-core.c b/block/blk-core.c index 30f6153a40c2..3ad405571dcc 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -473,6 +473,25 @@ void blk_queue_bypass_end(struct request_queue *q) } EXPORT_SYMBOL_GPL(blk_queue_bypass_end); +void blk_set_queue_dying(struct request_queue *q) +{ + queue_flag_set_unlocked(QUEUE_FLAG_DYING, q); + + if (q->mq_ops) + blk_mq_wake_waiters(q); + else { + struct request_list *rl; + + blk_queue_for_each_rl(rl, q) { + if (rl->rq_pool) { + wake_up(&rl->wait[BLK_RW_SYNC]); + wake_up(&rl->wait[BLK_RW_ASYNC]); + } + } + } +} +EXPORT_SYMBOL_GPL(blk_set_queue_dying); + /** * blk_cleanup_queue - shutdown a request queue * @q: request queue to shutdown @@ -486,7 +505,7 @@ void blk_cleanup_queue(struct request_queue *q) /* mark @q DYING, no new request or merges will be allowed afterwards */ mutex_lock(&q->sysfs_lock); - queue_flag_set_unlocked(QUEUE_FLAG_DYING, q); + blk_set_queue_dying(q); spin_lock_irq(lock); /* diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c index 32e8dbb9ad1c..60c9d4a93fe4 100644 --- a/block/blk-mq-tag.c +++ b/block/blk-mq-tag.c @@ -68,9 +68,9 @@ bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx) } /* - * Wakeup all potentially sleeping on normal (non-reserved) tags + * Wakeup all potentially sleeping on tags */ -static void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags) +void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool include_reserve) { struct blk_mq_bitmap_tags *bt; int i, wake_index; @@ -85,6 +85,12 @@ static void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags) wake_index = bt_index_inc(wake_index); } + + if (include_reserve) { + bt = &tags->breserved_tags; + if (waitqueue_active(&bt->bs[0].wait)) + wake_up(&bt->bs[0].wait); + } } /* @@ -100,7 +106,7 @@ void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx) atomic_dec(&tags->active_queues); - blk_mq_tag_wakeup_all(tags); + blk_mq_tag_wakeup_all(tags, false); } /* @@ -584,7 +590,7 @@ int blk_mq_tag_update_depth(struct blk_mq_tags *tags, unsigned int tdepth) * static and should never need resizing. */ bt_update_count(&tags->bitmap_tags, tdepth); - blk_mq_tag_wakeup_all(tags); + blk_mq_tag_wakeup_all(tags, false); return 0; } diff --git a/block/blk-mq-tag.h b/block/blk-mq-tag.h index 6206ed17ef76..a6fa0fc9d41a 100644 --- a/block/blk-mq-tag.h +++ b/block/blk-mq-tag.h @@ -54,6 +54,7 @@ extern bool blk_mq_has_free_tags(struct blk_mq_tags *tags); extern ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page); extern void blk_mq_tag_init_last_tag(struct blk_mq_tags *tags, unsigned int *last_tag); extern int blk_mq_tag_update_depth(struct blk_mq_tags *tags, unsigned int depth); +extern void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool); enum { BLK_MQ_TAG_CACHE_MIN = 1, diff --git a/block/blk-mq.c b/block/blk-mq.c index da1ab5641227..2390c5541e71 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -107,7 +107,7 @@ static void blk_mq_usage_counter_release(struct percpu_ref *ref) wake_up_all(&q->mq_freeze_wq); } -static void blk_mq_freeze_queue_start(struct request_queue *q) +void blk_mq_freeze_queue_start(struct request_queue *q) { bool freeze; @@ -120,6 +120,7 @@ static void blk_mq_freeze_queue_start(struct request_queue *q) blk_mq_run_queues(q, false); } } +EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_start); static void blk_mq_freeze_queue_wait(struct request_queue *q) { @@ -136,7 +137,7 @@ void blk_mq_freeze_queue(struct request_queue *q) blk_mq_freeze_queue_wait(q); } -static void blk_mq_unfreeze_queue(struct request_queue *q) +void blk_mq_unfreeze_queue(struct request_queue *q) { bool wake; @@ -149,6 +150,24 @@ static void blk_mq_unfreeze_queue(struct request_queue *q) wake_up_all(&q->mq_freeze_wq); } } +EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue); + +void blk_mq_wake_waiters(struct request_queue *q) +{ + struct blk_mq_hw_ctx *hctx; + unsigned int i; + + queue_for_each_hw_ctx(q, hctx, i) + if (blk_mq_hw_queue_mapped(hctx)) + blk_mq_tag_wakeup_all(hctx->tags, true); + + /* + * If we are called because the queue has now been marked as + * dying, we need to ensure that processes currently waiting on + * the queue are notified as well. + */ + wake_up_all(&q->mq_freeze_wq); +} bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx) { @@ -258,8 +277,10 @@ struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp, ctx = alloc_data.ctx; } blk_mq_put_ctx(ctx); - if (!rq) + if (!rq) { + blk_mq_queue_exit(q); return ERR_PTR(-EWOULDBLOCK); + } return rq; } EXPORT_SYMBOL(blk_mq_alloc_request); @@ -383,6 +404,12 @@ void blk_mq_complete_request(struct request *rq) } EXPORT_SYMBOL(blk_mq_complete_request); +int blk_mq_request_started(struct request *rq) +{ + return test_bit(REQ_ATOM_STARTED, &rq->atomic_flags); +} +EXPORT_SYMBOL_GPL(blk_mq_request_started); + void blk_mq_start_request(struct request *rq) { struct request_queue *q = rq->q; @@ -500,12 +527,38 @@ void blk_mq_add_to_requeue_list(struct request *rq, bool at_head) } EXPORT_SYMBOL(blk_mq_add_to_requeue_list); +void blk_mq_cancel_requeue_work(struct request_queue *q) +{ + cancel_work_sync(&q->requeue_work); +} +EXPORT_SYMBOL_GPL(blk_mq_cancel_requeue_work); + void blk_mq_kick_requeue_list(struct request_queue *q) { kblockd_schedule_work(&q->requeue_work); } EXPORT_SYMBOL(blk_mq_kick_requeue_list); +void blk_mq_abort_requeue_list(struct request_queue *q) +{ + unsigned long flags; + LIST_HEAD(rq_list); + + spin_lock_irqsave(&q->requeue_lock, flags); + list_splice_init(&q->requeue_list, &rq_list); + spin_unlock_irqrestore(&q->requeue_lock, flags); + + while (!list_empty(&rq_list)) { + struct request *rq; + + rq = list_first_entry(&rq_list, struct request, queuelist); + list_del_init(&rq->queuelist); + rq->errors = -EIO; + blk_mq_end_request(rq, rq->errors); + } +} +EXPORT_SYMBOL(blk_mq_abort_requeue_list); + static inline bool is_flush_request(struct request *rq, struct blk_flush_queue *fq, unsigned int tag) { @@ -566,13 +619,24 @@ void blk_mq_rq_timed_out(struct request *req, bool reserved) break; } } - + static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx, struct request *rq, void *priv, bool reserved) { struct blk_mq_timeout_data *data = priv; - if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) + if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) { + /* + * If a request wasn't started before the queue was + * marked dying, kill it here or it'll go unnoticed. + */ + if (unlikely(blk_queue_dying(rq->q))) { + rq->errors = -EIO; + blk_mq_complete_request(rq); + } + return; + } + if (rq->cmd_flags & REQ_NO_TIMEOUT) return; if (time_after_eq(jiffies, rq->deadline)) { @@ -1577,10 +1641,8 @@ static void blk_mq_free_hw_queues(struct request_queue *q, struct blk_mq_hw_ctx *hctx; unsigned int i; - queue_for_each_hw_ctx(q, hctx, i) { + queue_for_each_hw_ctx(q, hctx, i) free_cpumask_var(hctx->cpumask); - kfree(hctx); - } } static int blk_mq_init_hctx(struct request_queue *q, @@ -1601,7 +1663,6 @@ static int blk_mq_init_hctx(struct request_queue *q, hctx->queue = q; hctx->queue_num = hctx_idx; hctx->flags = set->flags; - hctx->cmd_size = set->cmd_size; blk_mq_init_cpu_notifier(&hctx->cpu_notifier, blk_mq_hctx_notify, hctx); @@ -1806,6 +1867,27 @@ static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set, mutex_unlock(&set->tag_list_lock); } +/* + * It is the actual release handler for mq, but we do it from + * request queue's release handler for avoiding use-after-free + * and headache because q->mq_kobj shouldn't have been introduced, + * but we can't group ctx/kctx kobj without it. + */ +void blk_mq_release(struct request_queue *q) +{ + struct blk_mq_hw_ctx *hctx; + unsigned int i; + + /* hctx kobj stays in hctx */ + queue_for_each_hw_ctx(q, hctx, i) + kfree(hctx); + + kfree(q->queue_hw_ctx); + + /* ctx kobj stays in queue_ctx */ + free_percpu(q->queue_ctx); +} + struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set) { struct blk_mq_hw_ctx **hctxs; @@ -1939,12 +2021,8 @@ void blk_mq_free_queue(struct request_queue *q) percpu_ref_exit(&q->mq_usage_counter); - free_percpu(q->queue_ctx); - kfree(q->queue_hw_ctx); kfree(q->mq_map); - q->queue_ctx = NULL; - q->queue_hw_ctx = NULL; q->mq_map = NULL; mutex_lock(&all_q_mutex); diff --git a/block/blk-mq.h b/block/blk-mq.h index 206230e64f79..6a48c4c0d8a2 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -32,6 +32,7 @@ void blk_mq_free_queue(struct request_queue *q); void blk_mq_clone_flush_request(struct request *flush_rq, struct request *orig_rq); int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr); +void blk_mq_wake_waiters(struct request_queue *q); /* * CPU hotplug helpers @@ -61,6 +62,8 @@ extern void blk_mq_sysfs_unregister(struct request_queue *q); extern void blk_mq_rq_timed_out(struct request *req, bool reserved); +void blk_mq_release(struct request_queue *q); + /* * Basic implementation of sparser bitmap, allowing the user to spread * the bits over more cachelines. diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 935ea2aa0730..faaf36ade7eb 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -517,6 +517,8 @@ static void blk_release_queue(struct kobject *kobj) if (!q->mq_ops) blk_free_flush_queue(q->fq); + else + blk_mq_release(q); blk_trace_shutdown(q); diff --git a/block/blk-timeout.c b/block/blk-timeout.c index 56c025894cdf..246dfb16c3d9 100644 --- a/block/blk-timeout.c +++ b/block/blk-timeout.c @@ -190,6 +190,9 @@ void blk_add_timer(struct request *req) struct request_queue *q = req->q; unsigned long expiry; + if (req->cmd_flags & REQ_NO_TIMEOUT) + return; + /* blk-mq has its own handler, so we don't need ->rq_timed_out_fn */ if (!q->mq_ops && !q->rq_timed_out_fn) return; diff --git a/block/partitions/efi.c b/block/partitions/efi.c index 56d08fd75b1a..26cb624ace05 100644 --- a/block/partitions/efi.c +++ b/block/partitions/efi.c @@ -715,7 +715,7 @@ int efi_partition(struct parsed_partitions *state) state->parts[i + 1].flags = ADDPART_FLAG_RAID; info = &state->parts[i + 1].info; - efi_guid_unparse(&ptes[i].unique_partition_guid, info->uuid); + efi_guid_to_str(&ptes[i].unique_partition_guid, info->uuid); /* Naively convert UTF16-LE to 7 bits. */ label_max = min(ARRAY_SIZE(info->volname) - 1, |