summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMike Christie <mchristi@redhat.com>2016-06-05 22:31:47 +0300
committerJens Axboe <axboe@fb.com>2016-06-07 22:41:38 +0300
commitc8d93247f1d0cf478222a7f4fc37d453d6193d04 (patch)
treec39ae14478ad516b0a01a975181833f30a483f63
parent511116669346a0029b7e54eaaa8e5a7029f89ab3 (diff)
downloadlinux-c8d93247f1d0cf478222a7f4fc37d453d6193d04.tar.xz
bcache: use op_is_write instead of checking for REQ_WRITE
We currently set REQ_WRITE/WRITE for all non READ IOs like discard, flush, writesame, etc. In the next patches where we no longer set up the op as a bitmap, we will not be able to detect a operation direction like writesame by testing if REQ_WRITE is set. This has bcache use the op_is_write helper which will do the right thing. Signed-off-by: Mike Christie <mchristi@redhat.com> Reviewed-by: Hannes Reinecke <hare@suse.com> Signed-off-by: Jens Axboe <axboe@fb.com>
-rw-r--r--drivers/md/bcache/io.c2
-rw-r--r--drivers/md/bcache/request.c6
2 files changed, 4 insertions, 4 deletions
diff --git a/drivers/md/bcache/io.c b/drivers/md/bcache/io.c
index 86a0bb87124e..fd885cc2afad 100644
--- a/drivers/md/bcache/io.c
+++ b/drivers/md/bcache/io.c
@@ -111,7 +111,7 @@ void bch_bbio_count_io_errors(struct cache_set *c, struct bio *bio,
struct bbio *b = container_of(bio, struct bbio, bio);
struct cache *ca = PTR_CACHE(c, &b->key, 0);
- unsigned threshold = bio->bi_rw & REQ_WRITE
+ unsigned threshold = op_is_write(bio_op(bio))
? c->congested_write_threshold_us
: c->congested_read_threshold_us;
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index 25fa8445bb24..6b85a23ec92a 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -383,7 +383,7 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
if (mode == CACHE_MODE_NONE ||
(mode == CACHE_MODE_WRITEAROUND &&
- (bio->bi_rw & REQ_WRITE)))
+ op_is_write(bio_op(bio))))
goto skip;
if (bio->bi_iter.bi_sector & (c->sb.block_size - 1) ||
@@ -404,7 +404,7 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
if (!congested &&
mode == CACHE_MODE_WRITEBACK &&
- (bio->bi_rw & REQ_WRITE) &&
+ op_is_write(bio_op(bio)) &&
(bio->bi_rw & REQ_SYNC))
goto rescale;
@@ -657,7 +657,7 @@ static inline struct search *search_alloc(struct bio *bio,
s->cache_miss = NULL;
s->d = d;
s->recoverable = 1;
- s->write = (bio->bi_rw & REQ_WRITE) != 0;
+ s->write = op_is_write(bio_op(bio));
s->read_dirty_data = 0;
s->start_time = jiffies;