summaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
Diffstat (limited to 'block')
-rw-r--r--block/blk-core.c2
-rw-r--r--block/blk-mq.c60
-rw-r--r--block/blk-sysfs.c2
-rw-r--r--block/bounce.c2
-rw-r--r--block/elevator.c6
5 files changed, 40 insertions, 32 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index fd154b94447a..7871603f0a29 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -552,6 +552,8 @@ void blk_cleanup_queue(struct request_queue *q)
q->queue_lock = &q->__queue_lock;
spin_unlock_irq(lock);
+ bdi_destroy(&q->backing_dev_info);
+
/* @q is and will stay empty, shutdown and put */
blk_put_queue(q);
}
diff --git a/block/blk-mq.c b/block/blk-mq.c
index ade8a2d1b0aa..e68b71b85a7e 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -677,8 +677,11 @@ static void blk_mq_rq_timer(unsigned long priv)
data.next = blk_rq_timeout(round_jiffies_up(data.next));
mod_timer(&q->timeout, data.next);
} else {
- queue_for_each_hw_ctx(q, hctx, i)
- blk_mq_tag_idle(hctx);
+ queue_for_each_hw_ctx(q, hctx, i) {
+ /* the hctx may be unmapped, so check it here */
+ if (blk_mq_hw_queue_mapped(hctx))
+ blk_mq_tag_idle(hctx);
+ }
}
}
@@ -855,6 +858,16 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
spin_lock(&hctx->lock);
list_splice(&rq_list, &hctx->dispatch);
spin_unlock(&hctx->lock);
+ /*
+ * the queue is expected stopped with BLK_MQ_RQ_QUEUE_BUSY, but
+ * it's possible the queue is stopped and restarted again
+ * before this. Queue restart will dispatch requests. And since
+ * requests in rq_list aren't added into hctx->dispatch yet,
+ * the requests in rq_list might get lost.
+ *
+ * blk_mq_run_hw_queue() already checks the STOPPED bit
+ **/
+ blk_mq_run_hw_queue(hctx, true);
}
}
@@ -1571,22 +1584,6 @@ static int blk_mq_hctx_cpu_offline(struct blk_mq_hw_ctx *hctx, int cpu)
return NOTIFY_OK;
}
-static int blk_mq_hctx_cpu_online(struct blk_mq_hw_ctx *hctx, int cpu)
-{
- struct request_queue *q = hctx->queue;
- struct blk_mq_tag_set *set = q->tag_set;
-
- if (set->tags[hctx->queue_num])
- return NOTIFY_OK;
-
- set->tags[hctx->queue_num] = blk_mq_init_rq_map(set, hctx->queue_num);
- if (!set->tags[hctx->queue_num])
- return NOTIFY_STOP;
-
- hctx->tags = set->tags[hctx->queue_num];
- return NOTIFY_OK;
-}
-
static int blk_mq_hctx_notify(void *data, unsigned long action,
unsigned int cpu)
{
@@ -1594,8 +1591,11 @@ static int blk_mq_hctx_notify(void *data, unsigned long action,
if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
return blk_mq_hctx_cpu_offline(hctx, cpu);
- else if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN)
- return blk_mq_hctx_cpu_online(hctx, cpu);
+
+ /*
+ * In case of CPU online, tags may be reallocated
+ * in blk_mq_map_swqueue() after mapping is updated.
+ */
return NOTIFY_OK;
}
@@ -1775,6 +1775,7 @@ static void blk_mq_map_swqueue(struct request_queue *q)
unsigned int i;
struct blk_mq_hw_ctx *hctx;
struct blk_mq_ctx *ctx;
+ struct blk_mq_tag_set *set = q->tag_set;
queue_for_each_hw_ctx(q, hctx, i) {
cpumask_clear(hctx->cpumask);
@@ -1803,16 +1804,20 @@ static void blk_mq_map_swqueue(struct request_queue *q)
* disable it and free the request entries.
*/
if (!hctx->nr_ctx) {
- struct blk_mq_tag_set *set = q->tag_set;
-
if (set->tags[i]) {
blk_mq_free_rq_map(set, set->tags[i], i);
set->tags[i] = NULL;
- hctx->tags = NULL;
}
+ hctx->tags = NULL;
continue;
}
+ /* unmapped hw queue can be remapped after CPU topo changed */
+ if (!set->tags[i])
+ set->tags[i] = blk_mq_init_rq_map(set, i);
+ hctx->tags = set->tags[i];
+ WARN_ON(!hctx->tags);
+
/*
* Set the map size to the number of mapped software queues.
* This is more accurate and more efficient than looping
@@ -2090,9 +2095,16 @@ static int blk_mq_queue_reinit_notify(struct notifier_block *nb,
*/
list_for_each_entry(q, &all_q_list, all_q_node)
blk_mq_freeze_queue_start(q);
- list_for_each_entry(q, &all_q_list, all_q_node)
+ list_for_each_entry(q, &all_q_list, all_q_node) {
blk_mq_freeze_queue_wait(q);
+ /*
+ * timeout handler can't touch hw queue during the
+ * reinitialization
+ */
+ del_timer_sync(&q->timeout);
+ }
+
list_for_each_entry(q, &all_q_list, all_q_node)
blk_mq_queue_reinit(q);
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index faaf36ade7eb..2b8fd302f677 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -522,8 +522,6 @@ static void blk_release_queue(struct kobject *kobj)
blk_trace_shutdown(q);
- bdi_destroy(&q->backing_dev_info);
-
ida_simple_remove(&blk_queue_ida, q->id);
call_rcu(&q->rcu_head, blk_free_queue_rcu);
}
diff --git a/block/bounce.c b/block/bounce.c
index ab21ba203d5c..ed9dd8067120 100644
--- a/block/bounce.c
+++ b/block/bounce.c
@@ -221,8 +221,8 @@ bounce:
if (page_to_pfn(page) <= queue_bounce_pfn(q) && !force)
continue;
- inc_zone_page_state(to->bv_page, NR_BOUNCE);
to->bv_page = mempool_alloc(pool, q->bounce_gfp);
+ inc_zone_page_state(to->bv_page, NR_BOUNCE);
if (rw == WRITE) {
char *vto, *vfrom;
diff --git a/block/elevator.c b/block/elevator.c
index 59794d0d38e3..8985038f398c 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -157,7 +157,7 @@ struct elevator_queue *elevator_alloc(struct request_queue *q,
eq = kzalloc_node(sizeof(*eq), GFP_KERNEL, q->node);
if (unlikely(!eq))
- goto err;
+ return NULL;
eq->type = e;
kobject_init(&eq->kobj, &elv_ktype);
@@ -165,10 +165,6 @@ struct elevator_queue *elevator_alloc(struct request_queue *q,
hash_init(eq->hash);
return eq;
-err:
- kfree(eq);
- elevator_put(e);
- return NULL;
}
EXPORT_SYMBOL(elevator_alloc);