summaryrefslogtreecommitdiff
path: root/block/blk-mq.c
diff options
context:
space:
mode:
Diffstat (limited to 'block/blk-mq.c')
-rw-r--r--block/blk-mq.c29
1 files changed, 20 insertions, 9 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 32d11305d51b..355db0abe44b 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -4972,12 +4972,13 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
* Switch back to the elevator type stored in the xarray.
*/
static void blk_mq_elv_switch_back(struct request_queue *q,
- struct xarray *elv_tbl)
+ struct xarray *elv_tbl, struct xarray *et_tbl)
{
struct elevator_type *e = xa_load(elv_tbl, q->id);
+ struct elevator_tags *t = xa_load(et_tbl, q->id);
/* The elv_update_nr_hw_queues unfreezes the queue. */
- elv_update_nr_hw_queues(q, e);
+ elv_update_nr_hw_queues(q, e, t);
/* Drop the reference acquired in blk_mq_elv_switch_none. */
if (e)
@@ -5029,7 +5030,8 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
int prev_nr_hw_queues = set->nr_hw_queues;
unsigned int memflags;
int i;
- struct xarray elv_tbl;
+ struct xarray elv_tbl, et_tbl;
+ bool queues_frozen = false;
lockdep_assert_held(&set->tag_list_lock);
@@ -5042,6 +5044,10 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
memflags = memalloc_noio_save();
+ xa_init(&et_tbl);
+ if (blk_mq_alloc_sched_tags_batch(&et_tbl, set, nr_hw_queues) < 0)
+ goto out_memalloc_restore;
+
xa_init(&elv_tbl);
list_for_each_entry(q, &set->tag_list, tag_set_list) {
@@ -5049,9 +5055,6 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
blk_mq_sysfs_unregister_hctxs(q);
}
- list_for_each_entry(q, &set->tag_list, tag_set_list)
- blk_mq_freeze_queue_nomemsave(q);
-
/*
* Switch IO scheduler to 'none', cleaning up the data associated
* with the previous scheduler. We will switch back once we are done
@@ -5061,6 +5064,9 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
if (blk_mq_elv_switch_none(q, &elv_tbl))
goto switch_back;
+ list_for_each_entry(q, &set->tag_list, tag_set_list)
+ blk_mq_freeze_queue_nomemsave(q);
+ queues_frozen = true;
if (blk_mq_realloc_tag_set_tags(set, nr_hw_queues) < 0)
goto switch_back;
@@ -5084,8 +5090,12 @@ fallback:
}
switch_back:
/* The blk_mq_elv_switch_back unfreezes queue for us. */
- list_for_each_entry(q, &set->tag_list, tag_set_list)
- blk_mq_elv_switch_back(q, &elv_tbl);
+ list_for_each_entry(q, &set->tag_list, tag_set_list) {
+ /* switch_back expects queue to be frozen */
+ if (!queues_frozen)
+ blk_mq_freeze_queue_nomemsave(q);
+ blk_mq_elv_switch_back(q, &elv_tbl, &et_tbl);
+ }
list_for_each_entry(q, &set->tag_list, tag_set_list) {
blk_mq_sysfs_register_hctxs(q);
@@ -5096,7 +5106,8 @@ switch_back:
}
xa_destroy(&elv_tbl);
-
+ xa_destroy(&et_tbl);
+out_memalloc_restore:
memalloc_noio_restore(memflags);
/* Free the excess tags when nr_hw_queues shrink. */