diff options
author | Dongli Zhang <dongli.zhang@oracle.com> | 2019-02-27 16:35:01 +0300 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2019-02-28 23:57:32 +0300 |
commit | 7d76f8562f4c42a5515741375790843fe4b8df83 (patch) | |
tree | 818e650939f1436e2e335e61d3ce5324e89c6454 /block | |
parent | 5b88a17cfdeba75e0092bab2c79aaf7d9e7db482 (diff) | |
download | linux-7d76f8562f4c42a5515741375790843fe4b8df83.tar.xz |
blk-mq: use HCTX_TYPE_DEFAULT but not 0 to index blk_mq_tag_set->map
Replace set->map[0] with set->map[HCTX_TYPE_DEFAULT] to avoid hardcoding.
Signed-off-by: Dongli Zhang <dongli.zhang@oracle.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-mq.c | 14 |
1 files changed, 7 insertions, 7 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c index fa508ee31742..fa024bce2b38 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -2061,7 +2061,7 @@ struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set, struct blk_mq_tags *tags; int node; - node = blk_mq_hw_queue_to_node(&set->map[0], hctx_idx); + node = blk_mq_hw_queue_to_node(&set->map[HCTX_TYPE_DEFAULT], hctx_idx); if (node == NUMA_NO_NODE) node = set->numa_node; @@ -2117,7 +2117,7 @@ int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, size_t rq_size, left; int node; - node = blk_mq_hw_queue_to_node(&set->map[0], hctx_idx); + node = blk_mq_hw_queue_to_node(&set->map[HCTX_TYPE_DEFAULT], hctx_idx); if (node == NUMA_NO_NODE) node = set->numa_node; @@ -2416,7 +2416,7 @@ static void blk_mq_map_swqueue(struct request_queue *q) * If the cpu isn't present, the cpu is mapped to first hctx. */ for_each_possible_cpu(i) { - hctx_idx = set->map[0].mq_map[i]; + hctx_idx = set->map[HCTX_TYPE_DEFAULT].mq_map[i]; /* unmapped hw queue can be remapped after CPU topo changed */ if (!set->tags[hctx_idx] && !__blk_mq_alloc_rq_map(set, hctx_idx)) { @@ -2426,7 +2426,7 @@ static void blk_mq_map_swqueue(struct request_queue *q) * case, remap the current ctx to hctx[0] which * is guaranteed to always have tags allocated */ - set->map[0].mq_map[i] = 0; + set->map[HCTX_TYPE_DEFAULT].mq_map[i] = 0; } ctx = per_cpu_ptr(q->queue_ctx, i); @@ -2733,7 +2733,7 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set, int node; struct blk_mq_hw_ctx *hctx; - node = blk_mq_hw_queue_to_node(&set->map[0], i); + node = blk_mq_hw_queue_to_node(&set->map[HCTX_TYPE_DEFAULT], i); /* * If the hw queue has been mapped to another numa node, * we need to realloc the hctx. If allocation fails, fallback @@ -2964,7 +2964,7 @@ static int blk_mq_update_queue_map(struct blk_mq_tag_set *set) return set->ops->map_queues(set); } else { BUG_ON(set->nr_maps > 1); - return blk_mq_map_queues(&set->map[0]); + return blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]); } } @@ -3234,7 +3234,7 @@ fallback: pr_warn("Increasing nr_hw_queues to %d fails, fallback to %d\n", nr_hw_queues, prev_nr_hw_queues); set->nr_hw_queues = prev_nr_hw_queues; - blk_mq_map_queues(&set->map[0]); + blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]); goto fallback; } blk_mq_map_swqueue(q); |