summaryrefslogtreecommitdiff
path: root/block/blk-mq.c
diff options
context:
space:
mode:
authorOmar Sandoval <osandov@fb.com>2017-03-21 18:56:06 +0300
committerJens Axboe <axboe@fb.com>2017-03-21 19:03:08 +0300
commitfa2e39cb9ee78f440d99a1bcfa47462c48a6fc11 (patch)
treef52d01dd11fabdbe05388ce755af699d7c38bf80 /block/blk-mq.c
parent0315b159085621d2ff72dbf69ca6fb4a5b32bae2 (diff)
downloadlinux-fa2e39cb9ee78f440d99a1bcfa47462c48a6fc11.tar.xz
blk-stat: use READ and WRITE instead of BLK_STAT_{READ,WRITE}
The stats buckets will become generic soon, so make the existing users use the common READ and WRITE definitions instead of one internal to blk-stat. Signed-off-by: Omar Sandoval <osandov@fb.com> Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block/blk-mq.c')
-rw-r--r--block/blk-mq.c12
1 files changed, 6 insertions, 6 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 534f49a90e3a..559e5363bb2c 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -2040,8 +2040,8 @@ static void blk_mq_init_cpu_queues(struct request_queue *q,
spin_lock_init(&__ctx->lock);
INIT_LIST_HEAD(&__ctx->rq_list);
__ctx->queue = q;
- blk_stat_init(&__ctx->stat[BLK_STAT_READ]);
- blk_stat_init(&__ctx->stat[BLK_STAT_WRITE]);
+ blk_stat_init(&__ctx->stat[READ]);
+ blk_stat_init(&__ctx->stat[WRITE]);
/* If the cpu isn't online, the cpu is mapped to first hctx */
if (!cpu_online(i))
@@ -2769,10 +2769,10 @@ static unsigned long blk_mq_poll_nsecs(struct request_queue *q,
* important on devices where the completion latencies are longer
* than ~10 usec.
*/
- if (req_op(rq) == REQ_OP_READ && stat[BLK_STAT_READ].nr_samples)
- ret = (stat[BLK_STAT_READ].mean + 1) / 2;
- else if (req_op(rq) == REQ_OP_WRITE && stat[BLK_STAT_WRITE].nr_samples)
- ret = (stat[BLK_STAT_WRITE].mean + 1) / 2;
+ if (req_op(rq) == REQ_OP_READ && stat[READ].nr_samples)
+ ret = (stat[READ].mean + 1) / 2;
+ else if (req_op(rq) == REQ_OP_WRITE && stat[WRITE].nr_samples)
+ ret = (stat[WRITE].mean + 1) / 2;
return ret;
}