diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-07-27 01:03:07 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-07-27 01:03:07 +0300 |
commit | d05d7f40791ccbb6e543cc5dd6a6aa08fc71d635 (patch) | |
tree | dc0039fe490a41a70de10d58fe8e6136db46463a /drivers | |
parent | 75a442efb1ca613f8d1cc71a32c2c9b0aefae4a5 (diff) | |
parent | 17007f3994cdb4643355c73f54f0adad006cf59e (diff) | |
download | linux-d05d7f40791ccbb6e543cc5dd6a6aa08fc71d635.tar.xz |
Merge branch 'for-4.8/core' of git://git.kernel.dk/linux-block
Pull core block updates from Jens Axboe:
- the big change is the cleanup from Mike Christie, cleaning up our
uses of command types and modified flags. This is what will throw
some merge conflicts
- regression fix for the above for btrfs, from Vincent
- following up to the above, better packing of struct request from
Christoph
- a 2038 fix for blktrace from Arnd
- a few trivial/spelling fixes from Bart Van Assche
- a front merge check fix from Damien, which could cause issues on
SMR drives
- Atari partition fix from Gabriel
- convert cfq to highres timers, since jiffies isn't granular enough
for some devices these days. From Jan and Jeff
- CFQ priority boost fix idle classes, from me
- cleanup series from Ming, improving our bio/bvec iteration
- a direct issue fix for blk-mq from Omar
- fix for plug merging not involving the IO scheduler, like we do for
other types of merges. From Tahsin
- expose DAX type internally and through sysfs. From Toshi and Yigal
* 'for-4.8/core' of git://git.kernel.dk/linux-block: (76 commits)
block: Fix front merge check
block: do not merge requests without consulting with io scheduler
block: Fix spelling in a source code comment
block: expose QUEUE_FLAG_DAX in sysfs
block: add QUEUE_FLAG_DAX for devices to advertise their DAX support
Btrfs: fix comparison in __btrfs_map_block()
block: atari: Return early for unsupported sector size
Doc: block: Fix a typo in queue-sysfs.txt
cfq-iosched: Charge at least 1 jiffie instead of 1 ns
cfq-iosched: Fix regression in bonnie++ rewrite performance
cfq-iosched: Convert slice_resid from u64 to s64
block: Convert fifo_time from ulong to u64
blktrace: avoid using timespec
block/blk-cgroup.c: Declare local symbols static
block/bio-integrity.c: Add #include "blk.h"
block/partition-generic.c: Remove a set-but-not-used variable
block: bio: kill BIO_MAX_SIZE
cfq-iosched: temporarily boost queue priority for idle classes
block: drbd: avoid to use BIO_MAX_SIZE
block: bio: remove BIO_MAX_SECTORS
...
Diffstat (limited to 'drivers')
77 files changed, 518 insertions, 436 deletions
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index 2bdb5dab922b..e207b33e4ce9 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -1190,7 +1190,7 @@ static int atapi_drain_needed(struct request *rq) if (likely(rq->cmd_type != REQ_TYPE_BLOCK_PC)) return 0; - if (!blk_rq_bytes(rq) || (rq->cmd_flags & REQ_WRITE)) + if (!blk_rq_bytes(rq) || op_is_write(req_op(rq))) return 0; return atapi_cmd_type(rq->cmd[0]) == ATAPI_MISC; diff --git a/drivers/block/brd.c b/drivers/block/brd.c index c04bd9bc39fd..dd96a935fba0 100644 --- a/drivers/block/brd.c +++ b/drivers/block/brd.c @@ -339,7 +339,7 @@ static blk_qc_t brd_make_request(struct request_queue *q, struct bio *bio) if (bio_end_sector(bio) > get_capacity(bdev->bd_disk)) goto io_error; - if (unlikely(bio->bi_rw & REQ_DISCARD)) { + if (unlikely(bio_op(bio) == REQ_OP_DISCARD)) { if (sector & ((PAGE_SIZE >> SECTOR_SHIFT) - 1) || bio->bi_iter.bi_size & ~PAGE_MASK) goto io_error; @@ -509,7 +509,9 @@ static struct brd_device *brd_alloc(int i) blk_queue_max_discard_sectors(brd->brd_queue, UINT_MAX); brd->brd_queue->limits.discard_zeroes_data = 1; queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, brd->brd_queue); - +#ifdef CONFIG_BLK_DEV_RAM_DAX + queue_flag_set_unlocked(QUEUE_FLAG_DAX, brd->brd_queue); +#endif disk = brd->brd_disk = alloc_disk(max_part); if (!disk) goto out_free_queue; diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c index 10459a145062..d524973f94b3 100644 --- a/drivers/block/drbd/drbd_actlog.c +++ b/drivers/block/drbd/drbd_actlog.c @@ -137,19 +137,19 @@ void wait_until_done_or_force_detached(struct drbd_device *device, struct drbd_b static int _drbd_md_sync_page_io(struct drbd_device *device, struct drbd_backing_dev *bdev, - sector_t sector, int rw) + sector_t sector, int op) { struct bio *bio; /* we do all our meta data IO in aligned 4k blocks. */ const int size = 4096; - int err; + int err, op_flags = 0; device->md_io.done = 0; device->md_io.error = -ENODEV; - if ((rw & WRITE) && !test_bit(MD_NO_FUA, &device->flags)) - rw |= REQ_FUA | REQ_FLUSH; - rw |= REQ_SYNC | REQ_NOIDLE; + if ((op == REQ_OP_WRITE) && !test_bit(MD_NO_FUA, &device->flags)) + op_flags |= REQ_FUA | REQ_PREFLUSH; + op_flags |= REQ_SYNC | REQ_NOIDLE; bio = bio_alloc_drbd(GFP_NOIO); bio->bi_bdev = bdev->md_bdev; @@ -159,9 +159,9 @@ static int _drbd_md_sync_page_io(struct drbd_device *device, goto out; bio->bi_private = device; bio->bi_end_io = drbd_md_endio; - bio->bi_rw = rw; + bio_set_op_attrs(bio, op, op_flags); - if (!(rw & WRITE) && device->state.disk == D_DISKLESS && device->ldev == NULL) + if (op != REQ_OP_WRITE && device->state.disk == D_DISKLESS && device->ldev == NULL) /* special case, drbd_md_read() during drbd_adm_attach(): no get_ldev */ ; else if (!get_ldev_if_state(device, D_ATTACHING)) { @@ -174,10 +174,10 @@ static int _drbd_md_sync_page_io(struct drbd_device *device, bio_get(bio); /* one bio_put() is in the completion handler */ atomic_inc(&device->md_io.in_use); /* drbd_md_put_buffer() is in the completion handler */ device->md_io.submit_jif = jiffies; - if (drbd_insert_fault(device, (rw & WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD)) + if (drbd_insert_fault(device, (op == REQ_OP_WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD)) bio_io_error(bio); else - submit_bio(rw, bio); + submit_bio(bio); wait_until_done_or_force_detached(device, bdev, &device->md_io.done); if (!bio->bi_error) err = device->md_io.error; @@ -188,7 +188,7 @@ static int _drbd_md_sync_page_io(struct drbd_device *device, } int drbd_md_sync_page_io(struct drbd_device *device, struct drbd_backing_dev *bdev, - sector_t sector, int rw) + sector_t sector, int op) { int err; D_ASSERT(device, atomic_read(&device->md_io.in_use) == 1); @@ -197,19 +197,21 @@ int drbd_md_sync_page_io(struct drbd_device *device, struct drbd_backing_dev *bd dynamic_drbd_dbg(device, "meta_data io: %s [%d]:%s(,%llus,%s) %pS\n", current->comm, current->pid, __func__, - (unsigned long long)sector, (rw & WRITE) ? "WRITE" : "READ", + (unsigned long long)sector, (op == REQ_OP_WRITE) ? "WRITE" : "READ", (void*)_RET_IP_ ); if (sector < drbd_md_first_sector(bdev) || sector + 7 > drbd_md_last_sector(bdev)) drbd_alert(device, "%s [%d]:%s(,%llus,%s) out of range md access!\n", current->comm, current->pid, __func__, - (unsigned long long)sector, (rw & WRITE) ? "WRITE" : "READ"); + (unsigned long long)sector, + (op == REQ_OP_WRITE) ? "WRITE" : "READ"); - err = _drbd_md_sync_page_io(device, bdev, sector, rw); + err = _drbd_md_sync_page_io(device, bdev, sector, op); if (err) { drbd_err(device, "drbd_md_sync_page_io(,%llus,%s) failed with error %d\n", - (unsigned long long)sector, (rw & WRITE) ? "WRITE" : "READ", err); + (unsigned long long)sector, + (op == REQ_OP_WRITE) ? "WRITE" : "READ", err); } return err; } @@ -845,7 +847,7 @@ int __drbd_change_sync(struct drbd_device *device, sector_t sector, int size, unsigned long count = 0; sector_t esector, nr_sectors; - /* This would be an empty REQ_FLUSH, be silent. */ + /* This would be an empty REQ_PREFLUSH, be silent. */ if ((mode == SET_OUT_OF_SYNC) && size == 0) return 0; diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c index 92d6fc020a65..e5d89f623b90 100644 --- a/drivers/block/drbd/drbd_bitmap.c +++ b/drivers/block/drbd/drbd_bitmap.c @@ -980,7 +980,7 @@ static void bm_page_io_async(struct drbd_bm_aio_ctx *ctx, int page_nr) __must_ho struct drbd_bitmap *b = device->bitmap; struct page *page; unsigned int len; - unsigned int rw = (ctx->flags & BM_AIO_READ) ? READ : WRITE; + unsigned int op = (ctx->flags & BM_AIO_READ) ? REQ_OP_READ : REQ_OP_WRITE; sector_t on_disk_sector = device->ldev->md.md_offset + device->ldev->md.bm_offset; @@ -1011,12 +1011,12 @@ static void bm_page_io_async(struct drbd_bm_aio_ctx *ctx, int page_nr) __must_ho bio_add_page(bio, page, len, 0); bio->bi_private = ctx; bio->bi_end_io = drbd_bm_endio; + bio_set_op_attrs(bio, op, 0); - if (drbd_insert_fault(device, (rw & WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD)) { - bio->bi_rw |= rw; + if (drbd_insert_fault(device, (op == REQ_OP_WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD)) { bio_io_error(bio); } else { - submit_bio(rw, bio); + submit_bio(bio); /* this should not count as user activity and cause the * resync to throttle -- see drbd_rs_should_slow_down(). */ atomic_add(len >> 9, &device->rs_sect_ev); diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h index 7a1cf7eaa71d..a64c645b4184 100644 --- a/drivers/block/drbd/drbd_int.h +++ b/drivers/block/drbd/drbd_int.h @@ -1327,14 +1327,14 @@ struct bm_extent { #endif #endif -/* BIO_MAX_SIZE is 256 * PAGE_SIZE, +/* Estimate max bio size as 256 * PAGE_SIZE, * so for typical PAGE_SIZE of 4k, that is (1<<20) Byte. * Since we may live in a mixed-platform cluster, * we limit us to a platform agnostic constant here for now. * A followup commit may allow even bigger BIO sizes, * once we thought that through. */ #define DRBD_MAX_BIO_SIZE (1U << 20) -#if DRBD_MAX_BIO_SIZE > BIO_MAX_SIZE +#if DRBD_MAX_BIO_SIZE > (BIO_MAX_PAGES << PAGE_SHIFT) #error Architecture not supported: DRBD_MAX_BIO_SIZE > BIO_MAX_SIZE #endif #define DRBD_MAX_BIO_SIZE_SAFE (1U << 12) /* Works always = 4k */ @@ -1507,7 +1507,7 @@ extern int drbd_resync_finished(struct drbd_device *device); extern void *drbd_md_get_buffer(struct drbd_device *device, const char *intent); extern void drbd_md_put_buffer(struct drbd_device *device); extern int drbd_md_sync_page_io(struct drbd_device *device, - struct drbd_backing_dev *bdev, sector_t sector, int rw); + struct drbd_backing_dev *bdev, sector_t sector, int op); extern void drbd_ov_out_of_sync_found(struct drbd_device *, sector_t, int); extern void wait_until_done_or_force_detached(struct drbd_device *device, struct drbd_backing_dev *bdev, unsigned int *done); @@ -1557,7 +1557,7 @@ extern bool drbd_rs_should_slow_down(struct drbd_device *device, sector_t sector bool throttle_if_app_is_waiting); extern int drbd_submit_peer_request(struct drbd_device *, struct drbd_peer_request *, const unsigned, - const int); + const unsigned, const int); extern int drbd_free_peer_reqs(struct drbd_device *, struct list_head *); extern struct drbd_peer_request *drbd_alloc_peer_req(struct drbd_peer_device *, u64, sector_t, unsigned int, diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index 2ba1494b2799..2b37744db0fa 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c @@ -1603,15 +1603,16 @@ static int _drbd_send_zc_ee(struct drbd_peer_device *peer_device, return 0; } -static u32 bio_flags_to_wire(struct drbd_connection *connection, unsigned long bi_rw) +static u32 bio_flags_to_wire(struct drbd_connection *connection, + struct bio *bio) { if (connection->agreed_pro_version >= 95) - return (bi_rw & REQ_SYNC ? DP_RW_SYNC : 0) | - (bi_rw & REQ_FUA ? DP_FUA : 0) | - (bi_rw & REQ_FLUSH ? DP_FLUSH : 0) | - (bi_rw & REQ_DISCARD ? DP_DISCARD : 0); + return (bio->bi_rw & REQ_SYNC ? DP_RW_SYNC : 0) | + (bio->bi_rw & REQ_FUA ? DP_FUA : 0) | + (bio->bi_rw & REQ_PREFLUSH ? DP_FLUSH : 0) | + (bio_op(bio) == REQ_OP_DISCARD ? DP_DISCARD : 0); else - return bi_rw & REQ_SYNC ? DP_RW_SYNC : 0; + return bio->bi_rw & REQ_SYNC ? DP_RW_SYNC : 0; } /* Used to send write or TRIM aka REQ_DISCARD requests @@ -1636,7 +1637,7 @@ int drbd_send_dblock(struct drbd_peer_device *peer_device, struct drbd_request * p->sector = cpu_to_be64(req->i.sector); p->block_id = (unsigned long)req; p->seq_num = cpu_to_be32(atomic_inc_return(&device->packet_seq)); - dp_flags = bio_flags_to_wire(peer_device->connection, req->master_bio->bi_rw); + dp_flags = bio_flags_to_wire(peer_device->connection, req->master_bio); if (device->state.conn >= C_SYNC_SOURCE && device->state.conn <= C_PAUSED_SYNC_T) dp_flags |= DP_MAY_SET_IN_SYNC; @@ -3061,7 +3062,7 @@ void drbd_md_write(struct drbd_device *device, void *b) D_ASSERT(device, drbd_md_ss(device->ldev) == device->ldev->md.md_offset); sector = device->ldev->md.md_offset; - if (drbd_md_sync_page_io(device, device->ldev, sector, WRITE)) { + if (drbd_md_sync_page_io(device, device->ldev, sector, REQ_OP_WRITE)) { /* this was a try anyways ... */ drbd_err(device, "meta data update failed!\n"); drbd_chk_io_error(device, 1, DRBD_META_IO_ERROR); @@ -3263,7 +3264,8 @@ int drbd_md_read(struct drbd_device *device, struct drbd_backing_dev *bdev) * Affects the paranoia out-of-range access check in drbd_md_sync_page_io(). */ bdev->md.md_size_sect = 8; - if (drbd_md_sync_page_io(device, bdev, bdev->md.md_offset, READ)) { + if (drbd_md_sync_page_io(device, bdev, bdev->md.md_offset, + REQ_OP_READ)) { /* NOTE: can't do normal error processing here as this is called BEFORE disk is attached */ drbd_err(device, "Error while reading metadata.\n"); diff --git a/drivers/block/drbd/drbd_protocol.h b/drivers/block/drbd/drbd_protocol.h index ef9245363dcc..129f8c76c9b1 100644 --- a/drivers/block/drbd/drbd_protocol.h +++ b/drivers/block/drbd/drbd_protocol.h @@ -112,7 +112,7 @@ struct p_header100 { #define DP_MAY_SET_IN_SYNC 4 #define DP_UNPLUG 8 /* not used anymore */ #define DP_FUA 16 /* equals REQ_FUA */ -#define DP_FLUSH 32 /* equals REQ_FLUSH */ +#define DP_FLUSH 32 /* equals REQ_PREFLUSH */ #define DP_DISCARD 64 /* equals REQ_DISCARD */ #define DP_SEND_RECEIVE_ACK 128 /* This is a proto B write request */ #define DP_SEND_WRITE_ACK 256 /* This is a proto C write request */ diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index 050aaa1c0350..1ee002352ea2 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c @@ -1398,7 +1398,8 @@ void drbd_bump_write_ordering(struct drbd_resource *resource, struct drbd_backin /* TODO allocate from our own bio_set. */ int drbd_submit_peer_request(struct drbd_device *device, struct drbd_peer_request *peer_req, - const unsigned rw, const int fault_type) + const unsigned op, const unsigned op_flags, + const int fault_type) { struct bio *bios = NULL; struct bio *bio; @@ -1450,7 +1451,7 @@ next_bio: /* > peer_req->i.sector, unless this is the first bio */ bio->bi_iter.bi_sector = sector; bio->bi_bdev = device->ldev->backing_bdev; - bio->bi_rw = rw; + bio_set_op_attrs(bio, op, op_flags); bio->bi_private = peer_req; bio->bi_end_io = drbd_peer_request_endio; @@ -1458,7 +1459,7 @@ next_bio: bios = bio; ++n_bios; - if (rw & REQ_DISCARD) { + if (op == REQ_OP_DISCARD) { bio->bi_iter.bi_size = data_size; goto submit; } @@ -1830,7 +1831,8 @@ static int recv_resync_read(struct drbd_peer_device *peer_device, sector_t secto spin_unlock_irq(&device->resource->req_lock); atomic_add(pi->size >> 9, &device->rs_sect_ev); - if (drbd_submit_peer_request(device, peer_req, WRITE, DRBD_FAULT_RS_WR) == 0) + if (drbd_submit_peer_request(device, peer_req, REQ_OP_WRITE, 0, + DRBD_FAULT_RS_WR) == 0) return 0; /* don't care for the reason here */ @@ -2152,12 +2154,19 @@ static int wait_for_and_update_peer_seq(struct drbd_peer_device *peer_device, co /* see also bio_flags_to_wire() * DRBD_REQ_*, because we need to semantically map the flags to data packet * flags and back. We may replicate to other kernel versions. */ -static unsigned long wire_flags_to_bio(u32 dpf) +static unsigned long wire_flags_to_bio_flags(u32 dpf) { return (dpf & DP_RW_SYNC ? REQ_SYNC : 0) | (dpf & DP_FUA ? REQ_FUA : 0) | - (dpf & DP_FLUSH ? REQ_FLUSH : 0) | - (dpf & DP_DISCARD ? REQ_DISCARD : 0); + (dpf & DP_FLUSH ? REQ_PREFLUSH : 0); +} + +static unsigned long wire_flags_to_bio_op(u32 dpf) +{ + if (dpf & DP_DISCARD) + return REQ_OP_DISCARD; + else + return REQ_OP_WRITE; } static void fail_postponed_requests(struct drbd_device *device, sector_t sector, @@ -2303,7 +2312,7 @@ static int receive_Data(struct drbd_connection *connection, struct packet_info * struct drbd_peer_request *peer_req; struct p_data *p = pi->data; u32 peer_seq = be32_to_cpu(p->seq_num); - int rw = WRITE; + int op, op_flags; u32 dp_flags; int err, tp; @@ -2342,14 +2351,15 @@ static int receive_Data(struct drbd_connection *connection, struct packet_info * peer_req->flags |= EE_APPLICATION; dp_flags = be32_to_cpu(p->dp_flags); - rw |= wire_flags_to_bio(dp_flags); + op = wire_flags_to_bio_op(dp_flags); + op_flags = wire_flags_to_bio_flags(dp_flags); if (pi->cmd == P_TRIM) { struct request_queue *q = bdev_get_queue(device->ldev->backing_bdev); peer_req->flags |= EE_IS_TRIM; if (!blk_queue_discard(q)) peer_req->flags |= EE_IS_TRIM_USE_ZEROOUT; D_ASSERT(peer_device, peer_req->i.size > 0); - D_ASSERT(peer_device, rw & REQ_DISCARD); + D_ASSERT(peer_device, op == REQ_OP_DISCARD); D_ASSERT(peer_device, peer_req->pages == NULL); } else if (peer_req->pages == NULL) { D_ASSERT(device, peer_req->i.size == 0); @@ -2433,7 +2443,8 @@ static int receive_Data(struct drbd_connection *connection, struct packet_info * peer_req->flags |= EE_CALL_AL_COMPLETE_IO; } - err = drbd_submit_peer_request(device, peer_req, rw, DRBD_FAULT_DT_WR); + err = drbd_submit_peer_request(device, peer_req, op, op_flags, + DRBD_FAULT_DT_WR); if (!err) return 0; @@ -2723,7 +2734,8 @@ submit_for_resync: submit: update_receiver_timing_details(connection, drbd_submit_peer_request); inc_unacked(device); - if (drbd_submit_peer_request(device, peer_req, READ, fault_type) == 0) + if (drbd_submit_peer_request(device, peer_req, REQ_OP_READ, 0, + fault_type) == 0) return 0; /* don't care for the reason here */ diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c index 2255dcfebd2b..eef6e9575b4e 100644 --- a/drivers/block/drbd/drbd_req.c +++ b/drivers/block/drbd/drbd_req.c @@ -1132,7 +1132,7 @@ static int drbd_process_write_request(struct drbd_request *req) * replicating, in which case there is no point. */ if (unlikely(req->i.size == 0)) { /* The only size==0 bios we expect are empty flushes. */ - D_ASSERT(device, req->master_bio->bi_rw & REQ_FLUSH); + D_ASSERT(device, req->master_bio->bi_rw & REQ_PREFLUSH); if (remote) _req_mod(req, QUEUE_AS_DRBD_BARRIER); return remote; diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c index 4d87499f0d54..51fab978eb61 100644 --- a/drivers/block/drbd/drbd_worker.c +++ b/drivers/block/drbd/drbd_worker.c @@ -174,7 +174,7 @@ void drbd_peer_request_endio(struct bio *bio) struct drbd_peer_request *peer_req = bio->bi_private; struct drbd_device *device = peer_req->peer_device->device; int is_write = bio_data_dir(bio) == WRITE; - int is_discard = !!(bio->bi_rw & REQ_DISCARD); + int is_discard = !!(bio_op(bio) == REQ_OP_DISCARD); if (bio->bi_error && __ratelimit(&drbd_ratelimit_state)) drbd_warn(device, "%s: error=%d s=%llus\n", @@ -248,7 +248,7 @@ void drbd_request_endio(struct bio *bio) /* to avoid recursion in __req_mod */ if (unlikely(bio->bi_error)) { - if (bio->bi_rw & REQ_DISCARD) + if (bio_op(bio) == REQ_OP_DISCARD) what = (bio->bi_error == -EOPNOTSUPP) ? DISCARD_COMPLETED_NOTSUPP : DISCARD_COMPLETED_WITH_ERROR; @@ -397,7 +397,8 @@ static int read_for_csum(struct drbd_peer_device *peer_device, sector_t sector, spin_unlock_irq(&device->resource->req_lock); atomic_add(size >> 9, &device->rs_sect_ev); - if (drbd_submit_peer_request(device, peer_req, READ, DRBD_FAULT_RS_RD) == 0) + if (drbd_submit_peer_request(device, peer_req, REQ_OP_READ, 0, + DRBD_FAULT_RS_RD) == 0) return 0; /* If it failed because of ENOMEM, retry should help. If it failed diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index 84708a5f8c52..f9bfecd733a8 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c @@ -3822,8 +3822,9 @@ static int __floppy_read_block_0(struct block_device *bdev, int drive) bio.bi_flags |= (1 << BIO_QUIET); bio.bi_private = &cbdata; bio.bi_end_io = floppy_rb0_cb; + bio_set_op_attrs(&bio, REQ_OP_READ, 0); - submit_bio(READ, &bio); + submit_bio(&bio); process_fd_request(); init_completion(&cbdata.complete); diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 1fa8cc235977..364d491d4bdd 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -447,7 +447,7 @@ static int lo_req_flush(struct loop_device *lo, struct request *rq) static inline void handle_partial_read(struct loop_cmd *cmd, long bytes) { - if (bytes < 0 || (cmd->rq->cmd_flags & REQ_WRITE)) + if (bytes < 0 || op_is_write(req_op(cmd->rq))) return; if (unlikely(bytes < blk_rq_bytes(cmd->rq))) { @@ -541,10 +541,10 @@ static int do_req_filebacked(struct loop_device *lo, struct request *rq) pos = ((loff_t) blk_rq_pos(rq) << 9) + lo->lo_offset; - if (rq->cmd_flags & REQ_WRITE) { - if (rq->cmd_flags & REQ_FLUSH) + if (op_is_write(req_op(rq))) { + if (req_op(rq) == REQ_OP_FLUSH) ret = lo_req_flush(lo, rq); - else if (rq->cmd_flags & REQ_DISCARD) + else if (req_op(rq) == REQ_OP_DISCARD) ret = lo_discard(lo, rq, pos); else if (lo->transfer) ret = lo_write_transfer(lo, rq, pos); @@ -1659,8 +1659,8 @@ static int loop_queue_rq(struct blk_mq_hw_ctx *hctx, if (lo->lo_state != Lo_bound) return -EIO; - if (lo->use_dio && !(cmd->rq->cmd_flags & (REQ_FLUSH | - REQ_DISCARD))) + if (lo->use_dio && (req_op(cmd->rq) != REQ_OP_FLUSH || + req_op(cmd->rq) == REQ_OP_DISCARD)) cmd->use_aio = true; else cmd->use_aio = false; @@ -1672,7 +1672,7 @@ static int loop_queue_rq(struct blk_mq_hw_ctx *hctx, static void loop_handle_cmd(struct loop_cmd *cmd) { - const bool write = cmd->rq->cmd_flags & REQ_WRITE; + const bool write = op_is_write(req_op(cmd->rq)); struct loop_device *lo = cmd->rq->q->queuedata; int ret = 0; diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c index 6053e4659fa2..8e3e708cb9ee 100644 --- a/drivers/block/mtip32xx/mtip32xx.c +++ b/drivers/block/mtip32xx/mtip32xx.c @@ -3765,7 +3765,7 @@ static int mtip_submit_request(struct blk_mq_hw_ctx *hctx, struct request *rq) return -ENODATA; } - if (rq->cmd_flags & REQ_DISCARD) { + if (req_op(rq) == REQ_OP_DISCARD) { int err; err = mtip_send_trim(dd, blk_rq_pos(rq), blk_rq_sectors(rq)); diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 6a48ed41963f..6f55b262b5ce 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -282,9 +282,9 @@ static int nbd_send_req(struct nbd_device *nbd, struct request *req) if (req->cmd_type == REQ_TYPE_DRV_PRIV) type = NBD_CMD_DISC; - else if (req->cmd_flags & REQ_DISCARD) + else if (req_op(req) == REQ_OP_DISCARD) type = NBD_CMD_TRIM; - else if (req->cmd_flags & REQ_FLUSH) + else if (req_op(req) == REQ_OP_FLUSH) type = NBD_CMD_FLUSH; else if (rq_data_dir(req) == WRITE) type = NBD_CMD_WRITE; diff --git a/drivers/block/osdblk.c b/drivers/block/osdblk.c index c2854a2bfdb0..92900f5f0b47 100644 --- a/drivers/block/osdblk.c +++ b/drivers/block/osdblk.c @@ -321,7 +321,7 @@ static void osdblk_rq_fn(struct request_queue *q) * driver-specific, etc. */ - do_flush = rq->cmd_flags & REQ_FLUSH; + do_flush = (req_op(rq) == REQ_OP_FLUSH); do_write = (rq_data_dir(rq) == WRITE); if (!do_flush) { /* osd_flush does not use a bio */ diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c index d06c62eccdf0..9393bc730acf 100644 --- a/drivers/block/pktcdvd.c +++ b/drivers/block/pktcdvd.c @@ -1074,7 +1074,7 @@ static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt) BUG(); atomic_inc(&pkt->io_wait); - bio->bi_rw = READ; + bio_set_op_attrs(bio, REQ_OP_READ, 0); pkt_queue_bio(pd, bio); frames_read++; } @@ -1336,7 +1336,7 @@ static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt) /* Start the write request */ atomic_set(&pkt->io_wait, 1); - pkt->w_bio->bi_rw = WRITE; + bio_set_op_attrs(pkt->w_bio, REQ_OP_WRITE, 0); pkt_queue_bio(pd, pkt->w_bio); } diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c index 4b7e405830d7..acb44529c05e 100644 --- a/drivers/block/ps3disk.c +++ b/drivers/block/ps3disk.c @@ -196,7 +196,7 @@ static void ps3disk_do_request(struct ps3_storage_device *dev, dev_dbg(&dev->sbd.core, "%s:%u\n", __func__, __LINE__); while ((req = blk_fetch_request(q))) { - if (req->cmd_flags & REQ_FLUSH) { + if (req_op(req) == REQ_OP_FLUSH) { if (ps3disk_submit_flush_request(dev, req)) break; } else if (req->cmd_type == REQ_TYPE_FS) { @@ -256,7 +256,7 @@ static irqreturn_t ps3disk_interrupt(int irq, void *data) return IRQ_HANDLED; } - if (req->cmd_flags & REQ_FLUSH) { + if (req_op(req) == REQ_OP_FLUSH) { read = 0; op = "flush"; } else { diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 81666a56415e..450662055d97 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -3286,9 +3286,9 @@ static void rbd_queue_workfn(struct work_struct *work) goto err; } - if (rq->cmd_flags & REQ_DISCARD) + if (req_op(rq) == REQ_OP_DISCARD) op_type = OBJ_OP_DISCARD; - else if (rq->cmd_flags & REQ_WRITE) + else if (req_op(rq) == REQ_OP_WRITE) op_type = OBJ_OP_WRITE; else op_type = OBJ_OP_READ; diff --git a/drivers/block/rsxx/dma.c b/drivers/block/rsxx/dma.c index cf8cd293abb5..5a20385f87d0 100644 --- a/drivers/block/rsxx/dma.c +++ b/drivers/block/rsxx/dma.c @@ -705,7 +705,7 @@ int rsxx_dma_queue_bio(struct rsxx_cardinfo *card, dma_cnt[i] = 0; } - if (bio->bi_rw & REQ_DISCARD) { + if (bio_op(bio) == REQ_OP_DISCARD) { bv_len = bio->bi_iter.bi_size; while (bv_len > 0) { diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c index 910e065918af..5c07a23e2ada 100644 --- a/drivers/block/skd_main.c +++ b/drivers/block/skd_main.c @@ -597,7 +597,7 @@ static void skd_request_fn(struct request_queue *q) data_dir = rq_data_dir(req); io_flags = req->cmd_flags; - if (io_flags & REQ_FLUSH) + if (req_op(req) == REQ_OP_FLUSH) flush++; if (io_flags & REQ_FUA) diff --git a/drivers/block/umem.c b/drivers/block/umem.c index 7939b9f87441..4b3ba74e9d22 100644 --- a/drivers/block/umem.c +++ b/drivers/block/umem.c @@ -462,7 +462,7 @@ static void process_page(unsigned long data) le32_to_cpu(desc->local_addr)>>9, le32_to_cpu(desc->transfer_size)); dump_dmastat(card, control); - } else if ((bio->bi_rw & REQ_WRITE) && + } else if (op_is_write(bio_op(bio)) && le32_to_cpu(desc->local_addr) >> 9 == card->init_size) { card->init_size += le32_to_cpu(desc->transfer_size) >> 9; diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 42758b52768c..18e4069dd24b 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -172,7 +172,7 @@ static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx, BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems); vbr->req = req; - if (req->cmd_flags & REQ_FLUSH) { + if (req_op(req) == REQ_OP_FLUSH) { vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_FLUSH); vbr->out_hdr.sector = 0; vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(vbr->req)); diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c index 4809c1501d7e..4a80ee752597 100644 --- a/drivers/block/xen-blkback/blkback.c +++ b/drivers/block/xen-blkback/blkback.c @@ -501,7 +501,7 @@ static int xen_vbd_translate(struct phys_req *req, struct xen_blkif *blkif, struct xen_vbd *vbd = &blkif->vbd; int rc = -EACCES; - if ((operation != READ) && vbd->readonly) + if ((operation != REQ_OP_READ) && vbd->readonly) goto out; if (likely(req->nr_sects)) { @@ -1014,7 +1014,7 @@ static int dispatch_discard_io(struct xen_blkif_ring *ring, preq.sector_number = req->u.discard.sector_number; preq.nr_sects = req->u.discard.nr_sectors; - err = xen_vbd_translate(&preq, blkif, WRITE); + err = xen_vbd_translate(&preq, blkif, REQ_OP_WRITE); if (err) { pr_warn("access denied: DISCARD [%llu->%llu] on dev=%04x\n", preq.sector_number, @@ -1229,6 +1229,7 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring, struct bio **biolist = pending_req->biolist; int i, nbio = 0; int operation; + int operation_flags = 0; struct blk_plug plug; bool drain = false; struct grant_page **pages = pending_req->segments; @@ -1247,17 +1248,19 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring, switch (req_operation) { case BLKIF_OP_READ: ring->st_rd_req++; - operation = READ; + operation = REQ_OP_READ; break; case BLKIF_OP_WRITE: ring->st_wr_req++; - operation = WRITE_ODIRECT; + operation = REQ_OP_WRITE; + operation_flags = WRITE_ODIRECT; break; case BLKIF_OP_WRITE_BARRIER: drain = true; case BLKIF_OP_FLUSH_DISKCACHE: ring->st_f_req++; - operation = WRITE_FLUSH; + operation = REQ_OP_WRITE; + operation_flags = WRITE_FLUSH; break; default: operation = 0; /* make gcc happy */ @@ -1269,7 +1272,7 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring, nseg = req->operation == BLKIF_OP_INDIRECT ? req->u.indirect.nr_segments : req->u.rw.nr_segments; - if (unlikely(nseg == 0 && operation != WRITE_FLUSH) || + if (unlikely(nseg == 0 && operation_flags != WRITE_FLUSH) || unlikely((req->operation != BLKIF_OP_INDIRECT) && (nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) || unlikely((req->operation == BLKIF_OP_INDIRECT) && @@ -1310,7 +1313,7 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring, if (xen_vbd_translate(&preq, ring->blkif, operation) != 0) { pr_debug("access denied: %s of [%llu,%llu] on dev=%04x\n", - operation == READ ? "read" : "write", + operation == REQ_OP_READ ? "read" : "write", preq.sector_number, preq.sector_number + preq.nr_sects, ring->blkif->vbd.pdevice); @@ -1369,6 +1372,7 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring, bio->bi_private = pending_req; bio->bi_end_io = end_block_io_op; bio->bi_iter.bi_sector = preq.sector_number; + bio_set_op_attrs(bio, operation, operation_flags); } preq.sector_number += seg[i].nsec; @@ -1376,7 +1380,7 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring, /* This will be hit if the operation was a flush or discard. */ if (!bio) { - BUG_ON(operation != WRITE_FLUSH); + BUG_ON(operation_flags != WRITE_FLUSH); bio = bio_alloc(GFP_KERNEL, 0); if (unlikely(bio == NULL)) @@ -1386,20 +1390,21 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring, bio->bi_bdev = preq.bdev; bio->bi_private = pending_req; bio->bi_end_io = end_block_io_op; + bio_set_op_attrs(bio, operation, operation_flags); } atomic_set(&pending_req->pendcnt, nbio); blk_start_plug(&plug); for (i = 0; i < nbio; i++) - submit_bio(operation, biolist[i]); + submit_bio(biolist[i]); /* Let the I/Os go.. */ blk_finish_plug(&plug); - if (operation == READ) + if (operation == REQ_OP_READ) ring->st_rd_sect += preq.nr_sects; - else if (operation & WRITE) + else if (operation == REQ_OP_WRITE) ring->st_wr_sect += preq.nr_sects; return 0; diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index fcc5b4e0aef2..da05d3f9bad2 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -196,6 +196,7 @@ struct blkfront_info unsigned int nr_ring_pages; struct request_queue *rq; unsigned int feature_flush; + unsigned int feature_fua; unsigned int feature_discard:1; unsigned int feature_secdiscard:1; unsigned int discard_granularity; @@ -746,7 +747,7 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri * The indirect operation can only be a BLKIF_OP_READ or * BLKIF_OP_WRITE */ - BUG_ON(req->cmd_flags & (REQ_FLUSH | REQ_FUA)); + BUG_ON(req_op(req) == REQ_OP_FLUSH || req->cmd_flags & REQ_FUA); ring_req->operation = BLKIF_OP_INDIRECT; ring_req->u.indirect.indirect_op = rq_data_dir(req) ? BLKIF_OP_WRITE : BLKIF_OP_READ; @@ -758,7 +759,7 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri ring_req->u.rw.handle = info->handle; ring_req->operation = rq_data_dir(req) ? BLKIF_OP_WRITE : BLKIF_OP_READ; - if (req->cmd_flags & (REQ_FLUSH | REQ_FUA)) { + if (req_op(req) == REQ_OP_FLUSH || req->cmd_flags & REQ_FUA) { /* * Ideally we can do an unordered flush-to-disk. * In case the backend onlysupports barriers, use that. @@ -766,19 +767,14 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri * implement it the same way. (It's also a FLUSH+FUA, * since it is guaranteed ordered WRT previous writes.) */ - switch (info->feature_flush & - ((REQ_FLUSH|REQ_FUA))) { - case REQ_FLUSH|REQ_FUA: + if (info->feature_flush && info->feature_fua) ring_req->operation = BLKIF_OP_WRITE_BARRIER; - break; - case REQ_FLUSH: + else if (info->feature_flush) ring_req->operation = BLKIF_OP_FLUSH_DISKCACHE; - break; - default: + else ring_req->operation = 0; - } } ring_req->u.rw.nr_segments = num_grant; if (unlikely(require_extra_req)) { @@ -847,7 +843,8 @@ static int blkif_queue_request(struct request *req, struct blkfront_ring_info *r if (unlikely(rinfo->dev_info->connected != BLKIF_STATE_CONNECTED)) return 1; - if (unlikely(req->cmd_flags & (REQ_DISCARD | REQ_SECURE))) + if (unlikely(req_op(req) == REQ_OP_DISCARD || + req->cmd_flags & REQ_SECURE)) return blkif_queue_discard_req(req, rinfo); else return blkif_queue_rw_req(req, rinfo); @@ -867,10 +864,10 @@ static inline bool blkif_request_flush_invalid(struct request *req, struct blkfront_info *info) { return ((req->cmd_type != REQ_TYPE_FS) || - ((req->cmd_flags & REQ_FLUSH) && - !(info->feature_flush & REQ_FLUSH)) || + ((req_op(req) == REQ_OP_FLUSH) && + !info->feature_flush) || ((req->cmd_flags & REQ_FUA) && - !(info->feature_flush & REQ_FUA))); + !info->feature_fua)); } static int blkif_queue_rq(struct blk_mq_hw_ctx *hctx, @@ -981,24 +978,22 @@ static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size, return 0; } -static const char *flush_info(unsigned int feature_flush) +static const char *flush_info(struct blkfront_info *info) { - switch (feature_flush & ((REQ_FLUSH | REQ_FUA))) { - case REQ_FLUSH|REQ_FUA: + if (info->feature_flush && info->feature_fua) return "barrier: enabled;"; - case REQ_FLUSH: + else if (info->feature_flush) return "flush diskcache: enabled;"; - default: + else return "barrier or flush: disabled;"; - } } static void xlvbd_flush(struct blkfront_info *info) { - blk_queue_write_cache(info->rq, info->feature_flush & REQ_FLUSH, - info->feature_flush & REQ_FUA); + blk_queue_write_cache(info->rq, info->feature_flush ? true : false, + info->feature_fua ? true : false); pr_info("blkfront: %s: %s %s %s %s %s\n", - info->gd->disk_name, flush_info(info->feature_flush), + info->gd->disk_name, flush_info(info), "persistent grants:", info->feature_persistent ? "enabled;" : "disabled;", "indirect descriptors:", info->max_indirect_segments ? "enabled;" : "disabled;"); @@ -1617,6 +1612,7 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id) if (unlikely(error)) { if (error == -EOPNOTSUPP) error = 0; + info->feature_fua = 0; info->feature_flush = 0; xlvbd_flush(info); } @@ -2064,7 +2060,7 @@ static int blkif_recover(struct blkfront_info *info) bio_trim(cloned_bio, offset, size); cloned_bio->bi_private = split_bio; cloned_bio->bi_end_io = split_bio_end; - submit_bio(cloned_bio->bi_rw, cloned_bio); + submit_bio(cloned_bio); } /* * Now we have to wait for all those smaller bios to @@ -2073,7 +2069,7 @@ static int blkif_recover(struct blkfront_info *info) continue; } /* We don't need to split this bio */ - submit_bio(bio->bi_rw, bio); + submit_bio(bio); } return 0; @@ -2108,8 +2104,10 @@ static int blkfront_resume(struct xenbus_device *dev) /* * Get the bios in the request so we can re-queue them. */ - if (shadow[j].request->cmd_flags & - (REQ_FLUSH | REQ_FUA | REQ_DISCARD | REQ_SECURE)) { + if (req_op(shadow[i].request) == REQ_OP_FLUSH || + req_op(shadow[i].request) == REQ_OP_DISCARD || + shadow[j].request->cmd_flags & (REQ_FUA | REQ_SECURE)) { + /* * Flush operations don't contain bios, so * we need to requeue the whole request @@ -2298,6 +2296,7 @@ static void blkfront_gather_backend_features(struct blkfront_info *info) unsigned int indirect_segments; info->feature_flush = 0; + info->feature_fua = 0; err = xenbus_gather(XBT_NIL, info->xbdev->otherend, "feature-barrier", "%d", &barrier, @@ -2310,8 +2309,11 @@ static void blkfront_gather_backend_features(struct blkfront_info *info) * * If there are barriers, then we use flush. */ - if (!err && barrier) - info->feature_flush = REQ_FLUSH | REQ_FUA; + if (!err && barrier) { + info->feature_flush = 1; + info->feature_fua = 1; + } + /* * And if there is "feature-flush-cache" use that above * barriers. @@ -2320,8 +2322,10 @@ static void blkfront_gather_backend_features(struct blkfront_info *info) "feature-flush-cache", "%d", &flush, NULL); - if (!err && flush) - info->feature_flush = REQ_FLUSH; + if (!err && flush) { + info->feature_flush = 1; + info->feature_fua = 0; + } err = xenbus_gather(XBT_NIL, info->xbdev->otherend, "feature-discard", "%d", &discard, diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index 8fcad8b761f1..e5e5d19f2172 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -874,7 +874,7 @@ static void __zram_make_request(struct zram *zram, struct bio *bio) offset = (bio->bi_iter.bi_sector & (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT; - if (unlikely(bio->bi_rw & REQ_DISCARD)) { + if (unlikely(bio_op(bio) == REQ_OP_DISCARD)) { zram_bio_discard(zram, index, offset, bio); bio_endio(bio); return; diff --git a/drivers/ide/ide-cd_ioctl.c b/drivers/ide/ide-cd_ioctl.c index 474173eb31bb..5887a7a09e37 100644 --- a/drivers/ide/ide-cd_ioctl.c +++ b/drivers/ide/ide-cd_ioctl.c @@ -459,9 +459,6 @@ int ide_cdrom_packet(struct cdrom_device_info *cdi, layer. the packet must be complete, as we do not touch it at all. */ - if (cgc->data_direction == CGC_DATA_WRITE) - flags |= REQ_WRITE; - if (cgc->sense) memset(cgc->sense, 0, sizeof(struct request_sense)); diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c index 05dbcce70b0e..e378ef70ed63 100644 --- a/drivers/ide/ide-disk.c +++ b/drivers/ide/ide-disk.c @@ -431,7 +431,7 @@ static int idedisk_prep_fn(struct request_queue *q, struct request *rq) ide_drive_t *drive = q->queuedata; struct ide_cmd *cmd; - if (!(rq->cmd_flags & REQ_FLUSH)) + if (req_op(rq) != REQ_OP_FLUSH) return BLKPREP_OK; if (rq->special) { diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c index 2fb5350c5410..f079d8d1d856 100644 --- a/drivers/ide/ide-floppy.c +++ b/drivers/ide/ide-floppy.c @@ -206,7 +206,7 @@ static void idefloppy_create_rw_cmd(ide_drive_t *drive, memcpy(rq->cmd, pc->c, 12); pc->rq = rq; - if (rq->cmd_flags & REQ_WRITE) + if (cmd == WRITE) pc->flags |= PC_FLAG_WRITING; pc->flags |= PC_FLAG_DMA_OK; diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c index 2103e97a974f..de86d72dcdf0 100644 --- a/drivers/lightnvm/rrpc.c +++ b/drivers/lightnvm/rrpc.c @@ -342,7 +342,7 @@ try: /* Perform read to do GC */ bio->bi_iter.bi_sector = rrpc_get_sector(rev->addr); - bio->bi_rw = READ; + bio_set_op_attrs(bio, REQ_OP_READ, 0); bio->bi_private = &wait; bio->bi_end_io = rrpc_end_sync_bio; @@ -364,7 +364,7 @@ try: reinit_completion(&wait); bio->bi_iter.bi_sector = rrpc_get_sector(rev->addr); - bio->bi_rw = WRITE; + bio_set_op_attrs(bio, REQ_OP_WRITE, 0); bio->bi_private = &wait; bio->bi_end_io = rrpc_end_sync_bio; @@ -908,7 +908,7 @@ static blk_qc_t rrpc_make_rq(struct request_queue *q, struct bio *bio) struct nvm_rq *rqd; int err; - if (bio->bi_rw & REQ_DISCARD) { + if (bio_op(bio) == REQ_OP_DISCARD) { rrpc_discard(rrpc, bio); return BLK_QC_T_NONE; } diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c index eab505ee0027..76f7534d1dd1 100644 --- a/drivers/md/bcache/btree.c +++ b/drivers/md/bcache/btree.c @@ -294,10 +294,10 @@ static void bch_btree_node_read(struct btree *b) closure_init_stack(&cl); bio = bch_bbio_alloc(b->c); - bio->bi_rw = REQ_META|READ_SYNC; bio->bi_iter.bi_size = KEY_SIZE(&b->key) << 9; bio->bi_end_io = btree_node_read_endio; bio->bi_private = &cl; + bio_set_op_attrs(bio, REQ_OP_READ, REQ_META|READ_SYNC); bch_bio_map(bio, b->keys.set[0].data); @@ -396,8 +396,8 @@ static void do_btree_node_write(struct btree *b) b->bio->bi_end_io = btree_node_write_endio; b->bio->bi_private = cl; - b->bio->bi_rw = REQ_META|WRITE_SYNC|REQ_FUA; b->bio->bi_iter.bi_size = roundup(set_bytes(i), block_bytes(b->c)); + bio_set_op_attrs(b->bio, REQ_OP_WRITE, REQ_META|WRITE_SYNC|REQ_FUA); bch_bio_map(b->bio, i); /* diff --git a/drivers/md/bcache/debug.c b/drivers/md/bcache/debug.c index 8b1f1d5c1819..c28df164701e 100644 --- a/drivers/md/bcache/debug.c +++ b/drivers/md/bcache/debug.c @@ -52,9 +52,10 @@ void bch_btree_verify(struct btree *b) bio->bi_bdev = PTR_CACHE(b->c, &b->key, 0)->bdev; bio->bi_iter.bi_sector = PTR_OFFSET(&b->key, 0); bio->bi_iter.bi_size = KEY_SIZE(&v->key) << 9; + bio_set_op_attrs(bio, REQ_OP_READ, REQ_META|READ_SYNC); bch_bio_map(bio, sorted); - submit_bio_wait(REQ_META|READ_SYNC, bio); + submit_bio_wait(bio); bch_bbio_free(bio, b->c); memcpy(ondisk, sorted, KEY_SIZE(&v->key) << 9); @@ -113,11 +114,12 @@ void bch_data_verify(struct cached_dev *dc, struct bio *bio) check = bio_clone(bio, GFP_NOIO); if (!check) return; + bio_set_op_attrs(check, REQ_OP_READ, READ_SYNC); if (bio_alloc_pages(check, GFP_NOIO)) goto out_put; - submit_bio_wait(READ_SYNC, check); + submit_bio_wait(check); bio_for_each_segment(bv, bio, iter) { void *p1 = kmap_atomic(bv.bv_page); diff --git a/drivers/md/bcache/io.c b/drivers/md/bcache/io.c index 86a0bb87124e..fd885cc2afad 100644 --- a/drivers/md/bcache/io.c +++ b/drivers/md/bcache/io.c @@ -111,7 +111,7 @@ void bch_bbio_count_io_errors(struct cache_set *c, struct bio *bio, struct bbio *b = container_of(bio, struct bbio, bio); struct cache *ca = PTR_CACHE(c, &b->key, 0); - unsigned threshold = bio->bi_rw & REQ_WRITE + unsigned threshold = op_is_write(bio_op(bio)) ? c->congested_write_threshold_us : c->congested_read_threshold_us; diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c index 29eba7219b01..6925023e12d4 100644 --- a/drivers/md/bcache/journal.c +++ b/drivers/md/bcache/journal.c @@ -54,11 +54,11 @@ reread: left = ca->sb.bucket_size - offset; bio_reset(bio); bio->bi_iter.bi_sector = bucket + offset; bio->bi_bdev = ca->bdev; - bio->bi_rw = READ; bio->bi_iter.bi_size = len << 9; bio->bi_end_io = journal_read_endio; bio->bi_private = &cl; + bio_set_op_attrs(bio, REQ_OP_READ, 0); bch_bio_map(bio, data); closure_bio_submit(bio, &cl); @@ -418,7 +418,7 @@ static void journal_discard_work(struct work_struct *work) struct journal_device *ja = container_of(work, struct journal_device, discard_work); - submit_bio(0, &ja->discard_bio); + submit_bio(&ja->discard_bio); } static void do_journal_discard(struct cache *ca) @@ -449,10 +449,10 @@ static void do_journal_discard(struct cache *ca) atomic_set(&ja->discard_in_flight, DISCARD_IN_FLIGHT); bio_init(bio); + bio_set_op_attrs(bio, REQ_OP_DISCARD, 0); bio->bi_iter.bi_sector = bucket_to_sector(ca->set, ca->sb.d[ja->discard_idx]); bio->bi_bdev = ca->bdev; - bio->bi_rw = REQ_WRITE|REQ_DISCARD; bio->bi_max_vecs = 1; bio->bi_io_vec = bio->bi_inline_vecs; bio->bi_iter.bi_size = bucket_bytes(ca); @@ -626,11 +626,12 @@ static void journal_write_unlocked(struct closure *cl) bio_reset(bio); bio->bi_iter.bi_sector = PTR_OFFSET(k, i); bio->bi_bdev = ca->bdev; - bio->bi_rw = REQ_WRITE|REQ_SYNC|REQ_META|REQ_FLUSH|REQ_FUA; bio->bi_iter.bi_size = sectors << 9; bio->bi_end_io = journal_write_endio; bio->bi_private = w; + bio_set_op_attrs(bio, REQ_OP_WRITE, + REQ_SYNC|REQ_META|REQ_PREFLUSH|REQ_FUA); bch_bio_map(bio, w->data); trace_bcache_journal_write(bio); diff --git a/drivers/md/bcache/movinggc.c b/drivers/md/bcache/movinggc.c index b929fc944e9c..1881319f2298 100644 --- a/drivers/md/bcache/movinggc.c +++ b/drivers/md/bcache/movinggc.c @@ -163,7 +163,7 @@ static void read_moving(struct cache_set *c) moving_init(io); bio = &io->bio.bio; - bio->bi_rw = READ; + bio_set_op_attrs(bio, REQ_OP_READ, 0); bio->bi_end_io = read_moving_endio; if (bio_alloc_pages(bio, GFP_KERNEL)) diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c index 25fa8445bb24..69f16f43f8ab 100644 --- a/drivers/md/bcache/request.c +++ b/drivers/md/bcache/request.c @@ -205,10 +205,10 @@ static void bch_data_insert_start(struct closure *cl) return bch_data_invalidate(cl); /* - * Journal writes are marked REQ_FLUSH; if the original write was a + * Journal writes are marked REQ_PREFLUSH; if the original write was a * flush, it'll wait on the journal write. */ - bio->bi_rw &= ~(REQ_FLUSH|REQ_FUA); + bio->bi_rw &= ~(REQ_PREFLUSH|REQ_FUA); do { unsigned i; @@ -253,7 +253,7 @@ static void bch_data_insert_start(struct closure *cl) trace_bcache_cache_insert(k); bch_keylist_push(&op->insert_keys); - n->bi_rw |= REQ_WRITE; + bio_set_op_attrs(n, REQ_OP_WRITE, 0); bch_submit_bbio(n, op->c, k, 0); } while (n != bio); @@ -378,12 +378,12 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio) if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) || c->gc_stats.in_use > CUTOFF_CACHE_ADD || - (bio->bi_rw & REQ_DISCARD)) + (bio_op(bio) == REQ_OP_DISCARD)) goto skip; if (mode == CACHE_MODE_NONE || (mode == CACHE_MODE_WRITEAROUND && - (bio->bi_rw & REQ_WRITE))) + op_is_write(bio_op(bio)))) goto skip; if (bio->bi_iter.bi_sector & (c->sb.block_size - 1) || @@ -404,7 +404,7 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio) if (!congested && mode == CACHE_MODE_WRITEBACK && - (bio->bi_rw & REQ_WRITE) && + op_is_write(bio_op(bio)) && (bio->bi_rw & REQ_SYNC)) goto rescale; @@ -657,7 +657,7 @@ static inline struct search *search_alloc(struct bio *bio, s->cache_miss = NULL; s->d = d; s->recoverable = 1; - s->write = (bio->bi_rw & REQ_WRITE) != 0; + s->write = op_is_write(bio_op(bio)); s->read_dirty_data = 0; s->start_time = jiffies; @@ -668,7 +668,7 @@ static inline struct search *search_alloc(struct bio *bio, s->iop.write_prio = 0; s->iop.error = 0; s->iop.flags = 0; - s->iop.flush_journal = (bio->bi_rw & (REQ_FLUSH|REQ_FUA)) != 0; + s->iop.flush_journal = (bio->bi_rw & (REQ_PREFLUSH|REQ_FUA)) != 0; s->iop.wq = bcache_wq; return s; @@ -899,7 +899,7 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s) * But check_overlapping drops dirty keys for which io hasn't started, * so we still want to call it. */ - if (bio->bi_rw & REQ_DISCARD) + if (bio_op(bio) == REQ_OP_DISCARD) s->iop.bypass = true; if (should_writeback(dc, s->orig_bio, @@ -913,22 +913,22 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s) s->iop.bio = s->orig_bio; bio_get(s->iop.bio); - if (!(bio->bi_rw & REQ_DISCARD) || + if ((bio_op(bio) != REQ_OP_DISCARD) || blk_queue_discard(bdev_get_queue(dc->bdev))) closure_bio_submit(bio, cl); } else if (s->iop.writeback) { bch_writeback_add(dc); s->iop.bio = bio; - if (bio->bi_rw & REQ_FLUSH) { + if (bio->bi_rw & REQ_PREFLUSH) { /* Also need to send a flush to the backing device */ struct bio *flush = bio_alloc_bioset(GFP_NOIO, 0, dc->disk.bio_split); - flush->bi_rw = WRITE_FLUSH; flush->bi_bdev = bio->bi_bdev; flush->bi_end_io = request_endio; flush->bi_private = cl; + bio_set_op_attrs(flush, REQ_OP_WRITE, WRITE_FLUSH); closure_bio_submit(flush, cl); } @@ -992,7 +992,7 @@ static blk_qc_t cached_dev_make_request(struct request_queue *q, cached_dev_read(dc, s); } } else { - if ((bio->bi_rw & REQ_DISCARD) && + if ((bio_op(bio) == REQ_OP_DISCARD) && !blk_queue_discard(bdev_get_queue(dc->bdev))) bio_endio(bio); else @@ -1103,7 +1103,7 @@ static blk_qc_t flash_dev_make_request(struct request_queue *q, &KEY(d->id, bio->bi_iter.bi_sector, 0), &KEY(d->id, bio_end_sector(bio), 0)); - s->iop.bypass = (bio->bi_rw & REQ_DISCARD) != 0; + s->iop.bypass = (bio_op(bio) == REQ_OP_DISCARD) != 0; s->iop.writeback = true; s->iop.bio = bio; diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index f5dbb4e884d8..c944daf75dd0 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -212,8 +212,8 @@ static void __write_super(struct cache_sb *sb, struct bio *bio) unsigned i; bio->bi_iter.bi_sector = SB_SECTOR; - bio->bi_rw = REQ_SYNC|REQ_META; bio->bi_iter.bi_size = SB_SIZE; + bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_SYNC|REQ_META); bch_bio_map(bio, NULL); out->offset = cpu_to_le64(sb->offset); @@ -238,7 +238,7 @@ static void __write_super(struct cache_sb *sb, struct bio *bio) pr_debug("ver %llu, flags %llu, seq %llu", sb->version, sb->flags, sb->seq); - submit_bio(REQ_WRITE, bio); + submit_bio(bio); } static void bch_write_bdev_super_unlock(struct closure *cl) @@ -333,7 +333,7 @@ static void uuid_io_unlock(struct closure *cl) up(&c->uuid_write_mutex); } -static void uuid_io(struct cache_set *c, unsigned long rw, +static void uuid_io(struct cache_set *c, int op, unsigned long op_flags, struct bkey *k, struct closure *parent) { struct closure *cl = &c->uuid_write; @@ -348,21 +348,22 @@ static void uuid_io(struct cache_set *c, unsigned long rw, for (i = 0; i < KEY_PTRS(k); i++) { struct bio *bio = bch_bbio_alloc(c); - bio->bi_rw = REQ_SYNC|REQ_META|rw; + bio->bi_rw = REQ_SYNC|REQ_META|op_flags; bio->bi_iter.bi_size = KEY_SIZE(k) << 9; bio->bi_end_io = uuid_endio; bio->bi_private = cl; + bio_set_op_attrs(bio, op, REQ_SYNC|REQ_META|op_flags); bch_bio_map(bio, c->uuids); bch_submit_bbio(bio, c, k, i); - if (!(rw & WRITE)) + if (op != REQ_OP_WRITE) break; } bch_extent_to_text(buf, sizeof(buf), k); - pr_debug("%s UUIDs at %s", rw & REQ_WRITE ? "wrote" : "read", buf); + pr_debug("%s UUIDs at %s", op == REQ_OP_WRITE ? "wrote" : "read", buf); for (u = c->uuids; u < c->uuids + c->nr_uuids; u++) if (!bch_is_zero(u->uuid, 16)) @@ -381,7 +382,7 @@ static char *uuid_read(struct cache_set *c, struct jset *j, struct closure *cl) return "bad uuid pointer"; bkey_copy(&c->uuid_bucket, k); - uuid_io(c, READ_SYNC, k, cl); + uuid_io(c, REQ_OP_READ, READ_SYNC, k, cl); if (j->version < BCACHE_JSET_VERSION_UUIDv1) { struct uuid_entry_v0 *u0 = (void *) c->uuids; @@ -426,7 +427,7 @@ static int __uuid_write(struct cache_set *c) return 1; SET_KEY_SIZE(&k.key, c->sb.bucket_size); - uuid_io(c, REQ_WRITE, &k.key, &cl); + uuid_io(c, REQ_OP_WRITE, 0, &k.key, &cl); closure_sync(&cl); bkey_copy(&c->uuid_bucket, &k.key); @@ -498,7 +499,8 @@ static void prio_endio(struct bio *bio) closure_put(&ca->prio); } -static void prio_io(struct cache *ca, uint64_t bucket, unsigned long rw) +static void prio_io(struct cache *ca, uint64_t bucket, int op, + unsigned long op_flags) { struct closure *cl = &ca->prio; struct bio *bio = bch_bbio_alloc(ca->set); @@ -507,11 +509,11 @@ static void prio_io(struct cache *ca, uint64_t bucket, unsigned long rw) bio->bi_iter.bi_sector = bucket * ca->sb.bucket_size; bio->bi_bdev = ca->bdev; - bio->bi_rw = REQ_SYNC|REQ_META|rw; bio->bi_iter.bi_size = bucket_bytes(ca); bio->bi_end_io = prio_endio; bio->bi_private = ca; + bio_set_op_attrs(bio, op, REQ_SYNC|REQ_META|op_flags); bch_bio_map(bio, ca->disk_buckets); closure_bio_submit(bio, &ca->prio); @@ -557,7 +559,7 @@ void bch_prio_write(struct cache *ca) BUG_ON(bucket == -1); mutex_unlock(&ca->set->bucket_lock); - prio_io(ca, bucket, REQ_WRITE); + prio_io(ca, bucket, REQ_OP_WRITE, 0); mutex_lock(&ca->set->bucket_lock); ca->prio_buckets[i] = bucket; @@ -599,7 +601,7 @@ static void prio_read(struct cache *ca, uint64_t bucket) ca->prio_last_buckets[bucket_nr] = bucket; bucket_nr++; - prio_io(ca, bucket, READ_SYNC); + prio_io(ca, bucket, REQ_OP_READ, READ_SYNC); if (p->csum != bch_crc64(&p->magic, bucket_bytes(ca) - 8)) pr_warn("bad csum reading priorities"); diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c index 60123677b382..d9fd2a62e5f6 100644 --- a/drivers/md/bcache/writeback.c +++ b/drivers/md/bcache/writeback.c @@ -182,7 +182,7 @@ static void write_dirty(struct closure *cl) struct keybuf_key *w = io->bio.bi_private; dirty_init(w); - io->bio.bi_rw = WRITE; + bio_set_op_attrs(&io->bio, REQ_OP_WRITE, 0); io->bio.bi_iter.bi_sector = KEY_START(&w->key); io->bio.bi_bdev = io->dc->bdev; io->bio.bi_end_io = dirty_endio; @@ -251,10 +251,10 @@ static void read_dirty(struct cached_dev *dc) io->dc = dc; dirty_init(w); + bio_set_op_attrs(&io->bio, REQ_OP_READ, 0); io->bio.bi_iter.bi_sector = PTR_OFFSET(&w->key, 0); io->bio.bi_bdev = PTR_CACHE(dc->disk.c, &w->key, 0)->bdev; - io->bio.bi_rw = READ; io->bio.bi_end_io = read_dirty_endio; if (bio_alloc_pages(&io->bio, GFP_KERNEL)) diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c index d8129ec93ebd..6fff794e0c72 100644 --- a/drivers/md/bitmap.c +++ b/drivers/md/bitmap.c @@ -162,7 +162,7 @@ static int read_sb_page(struct mddev *mddev, loff_t offset, if (sync_page_io(rdev, target, roundup(size, bdev_logical_block_size(rdev->bdev)), - page, READ, true)) { + page, REQ_OP_READ, 0, true)) { page->index = index; return 0; } @@ -297,7 +297,7 @@ static void write_page(struct bitmap *bitmap, struct page *page, int wait) atomic_inc(&bitmap->pending_writes); set_buffer_locked(bh); set_buffer_mapped(bh); - submit_bh(WRITE | REQ_SYNC, bh); + submit_bh(REQ_OP_WRITE, REQ_SYNC, bh); bh = bh->b_this_page; } @@ -392,7 +392,7 @@ static int read_page(struct file *file, unsigned long index, atomic_inc(&bitmap->pending_writes); set_buffer_locked(bh); set_buffer_mapped(bh); - submit_bh(READ, bh); + submit_bh(REQ_OP_READ, 0, bh); } block++; bh = bh->b_this_page; diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c index cd77216beff1..6571c81465e1 100644 --- a/drivers/md/dm-bufio.c +++ b/drivers/md/dm-bufio.c @@ -574,7 +574,8 @@ static void use_dmio(struct dm_buffer *b, int rw, sector_t block, { int r; struct dm_io_request io_req = { - .bi_rw = rw, + .bi_op = rw, + .bi_op_flags = 0, .notify.fn = dmio_complete, .notify.context = b, .client = b->c->dm_io, @@ -634,6 +635,7 @@ static void use_inline_bio(struct dm_buffer *b, int rw, sector_t block, * the dm_buffer's inline bio is local to bufio. */ b->bio.bi_private = end_io; + bio_set_op_attrs(&b->bio, rw, 0); /* * We assume that if len >= PAGE_SIZE ptr is page-aligned. @@ -660,7 +662,7 @@ static void use_inline_bio(struct dm_buffer *b, int rw, sector_t block, ptr += PAGE_SIZE; } while (len > 0); - submit_bio(rw, &b->bio); + submit_bio(&b->bio); } static void submit_io(struct dm_buffer *b, int rw, sector_t block, @@ -1326,7 +1328,8 @@ EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers); int dm_bufio_issue_flush(struct dm_bufio_client *c) { struct dm_io_request io_req = { - .bi_rw = WRITE_FLUSH, + .bi_op = REQ_OP_WRITE, + .bi_op_flags = WRITE_FLUSH, .mem.type = DM_IO_KMEM, .mem.ptr.addr = NULL, .client = c->dm_io, diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c index ee0510f9a85e..718744db62df 100644 --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c @@ -788,7 +788,8 @@ static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio) spin_lock_irqsave(&cache->lock, flags); if (cache->need_tick_bio && - !(bio->bi_rw & (REQ_FUA | REQ_FLUSH | REQ_DISCARD))) { + !(bio->bi_rw & (REQ_FUA | REQ_PREFLUSH)) && + bio_op(bio) != REQ_OP_DISCARD) { pb->tick = true; cache->need_tick_bio = false; } @@ -829,7 +830,7 @@ static dm_oblock_t get_bio_block(struct cache *cache, struct bio *bio) static int bio_triggers_commit(struct cache *cache, struct bio *bio) { - return bio->bi_rw & (REQ_FLUSH | REQ_FUA); + return bio->bi_rw & (REQ_PREFLUSH | REQ_FUA); } /* @@ -851,7 +852,7 @@ static void inc_ds(struct cache *cache, struct bio *bio, static bool accountable_bio(struct cache *cache, struct bio *bio) { return ((bio->bi_bdev == cache->origin_dev->bdev) && - !(bio->bi_rw & REQ_DISCARD)); + bio_op(bio) != REQ_OP_DISCARD); } static void accounted_begin(struct cache *cache, struct bio *bio) @@ -1067,7 +1068,8 @@ static void dec_io_migrations(struct cache *cache) static bool discard_or_flush(struct bio *bio) { - return bio->bi_rw & (REQ_FLUSH | REQ_FUA | REQ_DISCARD); + return bio_op(bio) == REQ_OP_DISCARD || + bio->bi_rw & (REQ_PREFLUSH | REQ_FUA); } static void __cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell) @@ -1612,8 +1614,8 @@ static void process_flush_bio(struct cache *cache, struct bio *bio) remap_to_cache(cache, bio, 0); /* - * REQ_FLUSH is not directed at any particular block so we don't - * need to inc_ds(). REQ_FUA's are split into a write + REQ_FLUSH + * REQ_PREFLUSH is not directed at any particular block so we don't + * need to inc_ds(). REQ_FUA's are split into a write + REQ_PREFLUSH * by dm-core. */ issue(cache, bio); @@ -1978,9 +1980,9 @@ static void process_deferred_bios(struct cache *cache) bio = bio_list_pop(&bios); - if (bio->bi_rw & REQ_FLUSH) + if (bio->bi_rw & REQ_PREFLUSH) process_flush_bio(cache, bio); - else if (bio->bi_rw & REQ_DISCARD) + else if (bio_op(bio) == REQ_OP_DISCARD) process_discard_bio(cache, &structs, bio); else process_bio(cache, &structs, bio); diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 4f3cb3554944..96dd5d7e454a 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -1136,7 +1136,7 @@ static void clone_init(struct dm_crypt_io *io, struct bio *clone) clone->bi_private = io; clone->bi_end_io = crypt_endio; clone->bi_bdev = cc->dev->bdev; - clone->bi_rw = io->base_bio->bi_rw; + bio_set_op_attrs(clone, bio_op(io->base_bio), io->base_bio->bi_rw); } static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp) @@ -1911,11 +1911,12 @@ static int crypt_map(struct dm_target *ti, struct bio *bio) struct crypt_config *cc = ti->private; /* - * If bio is REQ_FLUSH or REQ_DISCARD, just bypass crypt queues. - * - for REQ_FLUSH device-mapper core ensures that no IO is in-flight - * - for REQ_DISCARD caller must use flush if IO ordering matters + * If bio is REQ_PREFLUSH or REQ_OP_DISCARD, just bypass crypt queues. + * - for REQ_PREFLUSH device-mapper core ensures that no IO is in-flight + * - for REQ_OP_DISCARD caller must use flush if IO ordering matters */ - if (unlikely(bio->bi_rw & (REQ_FLUSH | REQ_DISCARD))) { + if (unlikely(bio->bi_rw & REQ_PREFLUSH || + bio_op(bio) == REQ_OP_DISCARD)) { bio->bi_bdev = cc->dev->bdev; if (bio_sectors(bio)) bio->bi_iter.bi_sector = cc->start + diff --git a/drivers/md/dm-era-target.c b/drivers/md/dm-era-target.c index 665bf3285618..2faf49d8f4d7 100644 --- a/drivers/md/dm-era-target.c +++ b/drivers/md/dm-era-target.c @@ -1540,9 +1540,9 @@ static int era_map(struct dm_target *ti, struct bio *bio) remap_to_origin(era, bio); /* - * REQ_FLUSH bios carry no data, so we're not interested in them. + * REQ_PREFLUSH bios carry no data, so we're not interested in them. */ - if (!(bio->bi_rw & REQ_FLUSH) && + if (!(bio->bi_rw & REQ_PREFLUSH) && (bio_data_dir(bio) == WRITE) && !metadata_current_marked(era->md, block)) { defer_bio(era, bio); diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c index b7341de87015..29b99fb6a16a 100644 --- a/drivers/md/dm-flakey.c +++ b/drivers/md/dm-flakey.c @@ -266,7 +266,7 @@ static void corrupt_bio_data(struct bio *bio, struct flakey_c *fc) data[fc->corrupt_bio_byte - 1] = fc->corrupt_bio_value; DMDEBUG("Corrupting data bio=%p by writing %u to byte %u " - "(rw=%c bi_rw=%lu bi_sector=%llu cur_bytes=%u)\n", + "(rw=%c bi_rw=%u bi_sector=%llu cur_bytes=%u)\n", bio, fc->corrupt_bio_value, fc->corrupt_bio_byte, (bio_data_dir(bio) == WRITE) ? 'w' : 'r', bio->bi_rw, (unsigned long long)bio->bi_iter.bi_sector, bio_bytes); diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c index 06d426eb5a30..0e225fd4a8d1 100644 --- a/drivers/md/dm-io.c +++ b/drivers/md/dm-io.c @@ -278,8 +278,9 @@ static void km_dp_init(struct dpages *dp, void *data) /*----------------------------------------------------------------- * IO routines that accept a list of pages. *---------------------------------------------------------------*/ -static void do_region(int rw, unsigned region, struct dm_io_region *where, - struct dpages *dp, struct io *io) +static void do_region(int op, int op_flags, unsigned region, + struct dm_io_region *where, struct dpages *dp, + struct io *io) { struct bio *bio; struct page *page; @@ -295,24 +296,25 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where, /* * Reject unsupported discard and write same requests. */ - if (rw & REQ_DISCARD) + if (op == REQ_OP_DISCARD) special_cmd_max_sectors = q->limits.max_discard_sectors; - else if (rw & REQ_WRITE_SAME) + else if (op == REQ_OP_WRITE_SAME) special_cmd_max_sectors = q->limits.max_write_same_sectors; - if ((rw & (REQ_DISCARD | REQ_WRITE_SAME)) && special_cmd_max_sectors == 0) { + if ((op == REQ_OP_DISCARD || op == REQ_OP_WRITE_SAME) && + special_cmd_max_sectors == 0) { dec_count(io, region, -EOPNOTSUPP); return; } /* - * where->count may be zero if rw holds a flush and we need to + * where->count may be zero if op holds a flush and we need to * send a zero-sized flush. */ do { /* * Allocate a suitably sized-bio. */ - if ((rw & REQ_DISCARD) || (rw & REQ_WRITE_SAME)) + if ((op == REQ_OP_DISCARD) || (op == REQ_OP_WRITE_SAME)) num_bvecs = 1; else num_bvecs = min_t(int, BIO_MAX_PAGES, @@ -322,13 +324,14 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where, bio->bi_iter.bi_sector = where->sector + (where->count - remaining); bio->bi_bdev = where->bdev; bio->bi_end_io = endio; + bio_set_op_attrs(bio, op, op_flags); store_io_and_region_in_bio(bio, io, region); - if (rw & REQ_DISCARD) { + if (op == REQ_OP_DISCARD) { num_sectors = min_t(sector_t, special_cmd_max_sectors, remaining); bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT; remaining -= num_sectors; - } else if (rw & REQ_WRITE_SAME) { + } else if (op == REQ_OP_WRITE_SAME) { /* * WRITE SAME only uses a single page. */ @@ -355,11 +358,11 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where, } atomic_inc(&io->count); - submit_bio(rw, bio); + submit_bio(bio); } while (remaining); } -static void dispatch_io(int rw, unsigned int num_regions, +static void dispatch_io(int op, int op_flags, unsigned int num_regions, struct dm_io_region *where, struct dpages *dp, struct io *io, int sync) { @@ -369,7 +372,7 @@ static void dispatch_io(int rw, unsigned int num_regions, BUG_ON(num_regions > DM_IO_MAX_REGIONS); if (sync) - rw |= REQ_SYNC; + op_flags |= REQ_SYNC; /* * For multiple regions we need to be careful to rewind @@ -377,8 +380,8 @@ static void dispatch_io(int rw, unsigned int num_regions, */ for (i = 0; i < num_regions; i++) { *dp = old_pages; - if (where[i].count || (rw & REQ_FLUSH)) - do_region(rw, i, where + i, dp, io); + if (where[i].count || (op_flags & REQ_PREFLUSH)) + do_region(op, op_flags, i, where + i, dp, io); } /* @@ -402,13 +405,13 @@ static void sync_io_complete(unsigned long error, void *context) } static int sync_io(struct dm_io_client *client, unsigned int num_regions, - struct dm_io_region *where, int rw, struct dpages *dp, - unsigned long *error_bits) + struct dm_io_region *where, int op, int op_flags, + struct dpages *dp, unsigned long *error_bits) { struct io *io; struct sync_io sio; - if (num_regions > 1 && (rw & RW_MASK) != WRITE) { + if (num_regions > 1 && !op_is_write(op)) { WARN_ON(1); return -EIO; } @@ -425,7 +428,7 @@ static int sync_io(struct dm_io_client *client, unsigned int num_regions, io->vma_invalidate_address = dp->vma_invalidate_address; io->vma_invalidate_size = dp->vma_invalidate_size; - dispatch_io(rw, num_regions, where, dp, io, 1); + dispatch_io(op, op_flags, num_regions, where, dp, io, 1); wait_for_completion_io(&sio.wait); @@ -436,12 +439,12 @@ static int sync_io(struct dm_io_client *client, unsigned int num_regions, } static int async_io(struct dm_io_client *client, unsigned int num_regions, - struct dm_io_region *where, int rw, struct dpages *dp, - io_notify_fn fn, void *context) + struct dm_io_region *where, int op, int op_flags, + struct dpages *dp, io_notify_fn fn, void *context) { struct io *io; - if (num_regions > 1 && (rw & RW_MASK) != WRITE) { + if (num_regions > 1 && !op_is_write(op)) { WARN_ON(1); fn(1, context); return -EIO; @@ -457,7 +460,7 @@ static int async_io(struct dm_io_client *client, unsigned int num_regions, io->vma_invalidate_address = dp->vma_invalidate_address; io->vma_invalidate_size = dp->vma_invalidate_size; - dispatch_io(rw, num_regions, where, dp, io, 0); + dispatch_io(op, op_flags, num_regions, where, dp, io, 0); return 0; } @@ -480,7 +483,7 @@ static int dp_init(struct dm_io_request *io_req, struct dpages *dp, case DM_IO_VMA: flush_kernel_vmap_range(io_req->mem.ptr.vma, size); - if ((io_req->bi_rw & RW_MASK) == READ) { + if (io_req->bi_op == REQ_OP_READ) { dp->vma_invalidate_address = io_req->mem.ptr.vma; dp->vma_invalidate_size = size; } @@ -518,10 +521,12 @@ int dm_io(struct dm_io_request *io_req, unsigned num_regions, if (!io_req->notify.fn) return sync_io(io_req->client, num_regions, where, - io_req->bi_rw, &dp, sync_error_bits); + io_req->bi_op, io_req->bi_op_flags, &dp, + sync_error_bits); - return async_io(io_req->client, num_regions, where, io_req->bi_rw, - &dp, io_req->notify.fn, io_req->notify.context); + return async_io(io_req->client, num_regions, where, io_req->bi_op, + io_req->bi_op_flags, &dp, io_req->notify.fn, + io_req->notify.context); } EXPORT_SYMBOL(dm_io); diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c index 1452ed9aacb4..9da1d54ac6cb 100644 --- a/drivers/md/dm-kcopyd.c +++ b/drivers/md/dm-kcopyd.c @@ -465,7 +465,7 @@ static void complete_io(unsigned long error, void *context) io_job_finish(kc->throttle); if (error) { - if (job->rw & WRITE) + if (op_is_write(job->rw)) job->write_err |= error; else job->read_err = 1; @@ -477,7 +477,7 @@ static void complete_io(unsigned long error, void *context) } } - if (job->rw & WRITE) + if (op_is_write(job->rw)) push(&kc->complete_jobs, job); else { @@ -496,7 +496,8 @@ static int run_io_job(struct kcopyd_job *job) { int r; struct dm_io_request io_req = { - .bi_rw = job->rw, + .bi_op = job->rw, + .bi_op_flags = 0, .mem.type = DM_IO_PAGE_LIST, .mem.ptr.pl = job->pages, .mem.offset = 0, @@ -550,7 +551,7 @@ static int process_jobs(struct list_head *jobs, struct dm_kcopyd_client *kc, if (r < 0) { /* error this rogue job */ - if (job->rw & WRITE) + if (op_is_write(job->rw)) job->write_err = (unsigned long) -1L; else job->read_err = 1; @@ -734,7 +735,7 @@ int dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from, /* * Use WRITE SAME to optimize zeroing if all dests support it. */ - job->rw = WRITE | REQ_WRITE_SAME; + job->rw = REQ_OP_WRITE_SAME; for (i = 0; i < job->num_dests; i++) if (!bdev_write_same(job->dests[i].bdev)) { job->rw = WRITE; diff --git a/drivers/md/dm-log-writes.c b/drivers/md/dm-log-writes.c index 608302e222af..b5dbf7a0515e 100644 --- a/drivers/md/dm-log-writes.c +++ b/drivers/md/dm-log-writes.c @@ -205,6 +205,7 @@ static int write_metadata(struct log_writes_c *lc, void *entry, bio->bi_bdev = lc->logdev->bdev; bio->bi_end_io = log_end_io; bio->bi_private = lc; + bio_set_op_attrs(bio, REQ_OP_WRITE, 0); page = alloc_page(GFP_KERNEL); if (!page) { @@ -226,7 +227,7 @@ static int write_metadata(struct log_writes_c *lc, void *entry, DMERR("Couldn't add page to the log block"); goto error_bio; } - submit_bio(WRITE, bio); + submit_bio(bio); return 0; error_bio: bio_put(bio); @@ -269,6 +270,7 @@ static int log_one_block(struct log_writes_c *lc, bio->bi_bdev = lc->logdev->bdev; bio->bi_end_io = log_end_io; bio->bi_private = lc; + bio_set_op_attrs(bio, REQ_OP_WRITE, 0); for (i = 0; i < block->vec_cnt; i++) { /* @@ -279,7 +281,7 @@ static int log_one_block(struct log_writes_c *lc, block->vecs[i].bv_len, 0); if (ret != block->vecs[i].bv_len) { atomic_inc(&lc->io_blocks); - submit_bio(WRITE, bio); + submit_bio(bio); bio = bio_alloc(GFP_KERNEL, block->vec_cnt - i); if (!bio) { DMERR("Couldn't alloc log bio"); @@ -290,6 +292,7 @@ static int log_one_block(struct log_writes_c *lc, bio->bi_bdev = lc->logdev->bdev; bio->bi_end_io = log_end_io; bio->bi_private = lc; + bio_set_op_attrs(bio, REQ_OP_WRITE, 0); ret = bio_add_page(bio, block->vecs[i].bv_page, block->vecs[i].bv_len, 0); @@ -301,7 +304,7 @@ static int log_one_block(struct log_writes_c *lc, } sector += block->vecs[i].bv_len >> SECTOR_SHIFT; } - submit_bio(WRITE, bio); + submit_bio(bio); out: kfree(block->data); kfree(block); @@ -552,9 +555,9 @@ static int log_writes_map(struct dm_target *ti, struct bio *bio) struct bio_vec bv; size_t alloc_size; int i = 0; - bool flush_bio = (bio->bi_rw & REQ_FLUSH); + bool flush_bio = (bio->bi_rw & REQ_PREFLUSH); bool fua_bio = (bio->bi_rw & REQ_FUA); - bool discard_bio = (bio->bi_rw & REQ_DISCARD); + bool discard_bio = (bio_op(bio) == REQ_OP_DISCARD); pb->block = NULL; diff --git a/drivers/md/dm-log.c b/drivers/md/dm-log.c index 627d19186d5a..4ca2d1df5b44 100644 --- a/drivers/md/dm-log.c +++ b/drivers/md/dm-log.c @@ -293,7 +293,7 @@ static void header_from_disk(struct log_header_core *core, struct log_header_dis static int rw_header(struct log_c *lc, int rw) { - lc->io_req.bi_rw = rw; + lc->io_req.bi_op = rw; return dm_io(&lc->io_req, 1, &lc->header_location, NULL); } @@ -306,7 +306,8 @@ static int flush_header(struct log_c *lc) .count = 0, }; - lc->io_req.bi_rw = WRITE_FLUSH; + lc->io_req.bi_op = REQ_OP_WRITE; + lc->io_req.bi_op_flags = WRITE_FLUSH; return dm_io(&lc->io_req, 1, &null_location, NULL); } diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index 52532745a50f..8cbac62b1602 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c @@ -792,7 +792,7 @@ static int read_disk_sb(struct md_rdev *rdev, int size) if (rdev->sb_loaded) return 0; - if (!sync_page_io(rdev, 0, size, rdev->sb_page, READ, 1)) { + if (!sync_page_io(rdev, 0, size, rdev->sb_page, REQ_OP_READ, 0, 1)) { DMERR("Failed to read superblock of device at position %d", rdev->raid_disk); md_error(rdev->mddev, rdev); @@ -1651,7 +1651,8 @@ static void attempt_restore_of_faulty_devices(struct raid_set *rs) for (i = 0; i < rs->md.raid_disks; i++) { r = &rs->dev[i].rdev; if (test_bit(Faulty, &r->flags) && r->sb_page && - sync_page_io(r, 0, r->sb_size, r->sb_page, READ, 1)) { + sync_page_io(r, 0, r->sb_size, r->sb_page, REQ_OP_READ, 0, + 1)) { DMINFO("Faulty %s device #%d has readable super block." " Attempting to revive it.", rs->raid_type->name, i); diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c index b3ccf1e0d4f2..9f5f460c0e92 100644 --- a/drivers/md/dm-raid1.c +++ b/drivers/md/dm-raid1.c @@ -260,7 +260,8 @@ static int mirror_flush(struct dm_target *ti) struct dm_io_region io[ms->nr_mirrors]; struct mirror *m; struct dm_io_request io_req = { - .bi_rw = WRITE_FLUSH, + .bi_op = REQ_OP_WRITE, + .bi_op_flags = WRITE_FLUSH, .mem.type = DM_IO_KMEM, .mem.ptr.addr = NULL, .client = ms->io_client, @@ -541,7 +542,8 @@ static void read_async_bio(struct mirror *m, struct bio *bio) { struct dm_io_region io; struct dm_io_request io_req = { - .bi_rw = READ, + .bi_op = REQ_OP_READ, + .bi_op_flags = 0, .mem.type = DM_IO_BIO, .mem.ptr.bio = bio, .notify.fn = read_callback, @@ -624,7 +626,7 @@ static void write_callback(unsigned long error, void *context) * If the bio is discard, return an error, but do not * degrade the array. */ - if (bio->bi_rw & REQ_DISCARD) { + if (bio_op(bio) == REQ_OP_DISCARD) { bio->bi_error = -EOPNOTSUPP; bio_endio(bio); return; @@ -654,7 +656,8 @@ static void do_write(struct mirror_set *ms, struct bio *bio) struct dm_io_region io[ms->nr_mirrors], *dest = io; struct mirror *m; struct dm_io_request io_req = { - .bi_rw = WRITE | (bio->bi_rw & WRITE_FLUSH_FUA), + .bi_op = REQ_OP_WRITE, + .bi_op_flags = bio->bi_rw & WRITE_FLUSH_FUA, .mem.type = DM_IO_BIO, .mem.ptr.bio = bio, .notify.fn = write_callback, @@ -662,8 +665,8 @@ static void do_write(struct mirror_set *ms, struct bio *bio) .client = ms->io_client, }; - if (bio->bi_rw & REQ_DISCARD) { - io_req.bi_rw |= REQ_DISCARD; + if (bio_op(bio) == REQ_OP_DISCARD) { + io_req.bi_op = REQ_OP_DISCARD; io_req.mem.type = DM_IO_KMEM; io_req.mem.ptr.addr = NULL; } @@ -701,8 +704,8 @@ static void do_writes(struct mirror_set *ms, struct bio_list *writes) bio_list_init(&requeue); while ((bio = bio_list_pop(writes))) { - if ((bio->bi_rw & REQ_FLUSH) || - (bio->bi_rw & REQ_DISCARD)) { + if ((bio->bi_rw & REQ_PREFLUSH) || + (bio_op(bio) == REQ_OP_DISCARD)) { bio_list_add(&sync, bio); continue; } @@ -1250,7 +1253,8 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error) * We need to dec pending if this was a write. */ if (rw == WRITE) { - if (!(bio->bi_rw & (REQ_FLUSH | REQ_DISCARD))) + if (!(bio->bi_rw & REQ_PREFLUSH) && + bio_op(bio) != REQ_OP_DISCARD) dm_rh_dec(ms->rh, bio_record->write_region); return error; } diff --git a/drivers/md/dm-region-hash.c b/drivers/md/dm-region-hash.c index 74cb7b991d41..b11813431f31 100644 --- a/drivers/md/dm-region-hash.c +++ b/drivers/md/dm-region-hash.c @@ -398,12 +398,12 @@ void dm_rh_mark_nosync(struct dm_region_hash *rh, struct bio *bio) region_t region = dm_rh_bio_to_region(rh, bio); int recovering = 0; - if (bio->bi_rw & REQ_FLUSH) { + if (bio->bi_rw & REQ_PREFLUSH) { rh->flush_failure = 1; return; } - if (bio->bi_rw & REQ_DISCARD) + if (bio_op(bio) == REQ_OP_DISCARD) return; /* We must inform the log that the sync count has changed. */ @@ -526,7 +526,7 @@ void dm_rh_inc_pending(struct dm_region_hash *rh, struct bio_list *bios) struct bio *bio; for (bio = bios->head; bio; bio = bio->bi_next) { - if (bio->bi_rw & (REQ_FLUSH | REQ_DISCARD)) + if (bio->bi_rw & REQ_PREFLUSH || bio_op(bio) == REQ_OP_DISCARD) continue; rh_inc(rh, dm_rh_bio_to_region(rh, bio)); } diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c index 4d3909393f2c..b8cf956b577b 100644 --- a/drivers/md/dm-snap-persistent.c +++ b/drivers/md/dm-snap-persistent.c @@ -226,8 +226,8 @@ static void do_metadata(struct work_struct *work) /* * Read or write a chunk aligned and sized block of data from a device. */ -static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, int rw, - int metadata) +static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, int op, + int op_flags, int metadata) { struct dm_io_region where = { .bdev = dm_snap_cow(ps->store->snap)->bdev, @@ -235,7 +235,8 @@ static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, int rw, .count = ps->store->chunk_size, }; struct dm_io_request io_req = { - .bi_rw = rw, + .bi_op = op, + .bi_op_flags = op_flags, .mem.type = DM_IO_VMA, .mem.ptr.vma = area, .client = ps->io_client, @@ -281,14 +282,14 @@ static void skip_metadata(struct pstore *ps) * Read or write a metadata area. Remembering to skip the first * chunk which holds the header. */ -static int area_io(struct pstore *ps, int rw) +static int area_io(struct pstore *ps, int op, int op_flags) { int r; chunk_t chunk; chunk = area_location(ps, ps->current_area); - r = chunk_io(ps, ps->area, chunk, rw, 0); + r = chunk_io(ps, ps->area, chunk, op, op_flags, 0); if (r) return r; @@ -302,7 +303,8 @@ static void zero_memory_area(struct pstore *ps) static int zero_disk_area(struct pstore *ps, chunk_t area) { - return chunk_io(ps, ps->zero_area, area_location(ps, area), WRITE, 0); + return chunk_io(ps, ps->zero_area, area_location(ps, area), + REQ_OP_WRITE, 0, 0); } static int read_header(struct pstore *ps, int *new_snapshot) @@ -334,7 +336,7 @@ static int read_header(struct pstore *ps, int *new_snapshot) if (r) return r; - r = chunk_io(ps, ps->header_area, 0, READ, 1); + r = chunk_io(ps, ps->header_area, 0, REQ_OP_READ, 0, 1); if (r) goto bad; @@ -395,7 +397,7 @@ static int write_header(struct pstore *ps) dh->version = cpu_to_le32(ps->version); dh->chunk_size = cpu_to_le32(ps->store->chunk_size); - return chunk_io(ps, ps->header_area, 0, WRITE, 1); + return chunk_io(ps, ps->header_area, 0, REQ_OP_WRITE, 0, 1); } /* @@ -739,7 +741,7 @@ static void persistent_commit_exception(struct dm_exception_store *store, /* * Commit exceptions to disk. */ - if (ps->valid && area_io(ps, WRITE_FLUSH_FUA)) + if (ps->valid && area_io(ps, REQ_OP_WRITE, WRITE_FLUSH_FUA)) ps->valid = 0; /* @@ -779,7 +781,7 @@ static int persistent_prepare_merge(struct dm_exception_store *store, return 0; ps->current_area--; - r = area_io(ps, READ); + r = area_io(ps, REQ_OP_READ, 0); if (r < 0) return r; ps->current_committed = ps->exceptions_per_area; @@ -816,7 +818,7 @@ static int persistent_commit_merge(struct dm_exception_store *store, for (i = 0; i < nr_merged; i++) clear_exception(ps, ps->current_committed - 1 - i); - r = area_io(ps, WRITE_FLUSH_FUA); + r = area_io(ps, REQ_OP_WRITE, WRITE_FLUSH_FUA); if (r < 0) return r; diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c index 70bb0e8b62ce..69ab1ff5f5c9 100644 --- a/drivers/md/dm-snap.c +++ b/drivers/md/dm-snap.c @@ -1680,7 +1680,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio) init_tracked_chunk(bio); - if (bio->bi_rw & REQ_FLUSH) { + if (bio->bi_rw & REQ_PREFLUSH) { bio->bi_bdev = s->cow->bdev; return DM_MAPIO_REMAPPED; } @@ -1799,7 +1799,7 @@ static int snapshot_merge_map(struct dm_target *ti, struct bio *bio) init_tracked_chunk(bio); - if (bio->bi_rw & REQ_FLUSH) { + if (bio->bi_rw & REQ_PREFLUSH) { if (!dm_bio_get_target_bio_nr(bio)) bio->bi_bdev = s->origin->bdev; else @@ -2285,7 +2285,7 @@ static int origin_map(struct dm_target *ti, struct bio *bio) bio->bi_bdev = o->dev->bdev; - if (unlikely(bio->bi_rw & REQ_FLUSH)) + if (unlikely(bio->bi_rw & REQ_PREFLUSH)) return DM_MAPIO_REMAPPED; if (bio_rw(bio) != WRITE) diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c index 8289804ccd99..4fba26cd6bdb 100644 --- a/drivers/md/dm-stats.c +++ b/drivers/md/dm-stats.c @@ -514,11 +514,10 @@ static void dm_stat_round(struct dm_stat *s, struct dm_stat_shared *shared, } static void dm_stat_for_entry(struct dm_stat *s, size_t entry, - unsigned long bi_rw, sector_t len, + int idx, sector_t len, struct dm_stats_aux *stats_aux, bool end, unsigned long duration_jiffies) { - unsigned long idx = bi_rw & REQ_WRITE; struct dm_stat_shared *shared = &s->stat_shared[entry]; struct dm_stat_percpu *p; @@ -584,7 +583,7 @@ static void dm_stat_for_entry(struct dm_stat *s, size_t entry, #endif } -static void __dm_stat_bio(struct dm_stat *s, unsigned long bi_rw, +static void __dm_stat_bio(struct dm_stat *s, int bi_rw, sector_t bi_sector, sector_t end_sector, bool end, unsigned long duration_jiffies, struct dm_stats_aux *stats_aux) @@ -645,8 +644,8 @@ void dm_stats_account_io(struct dm_stats *stats, unsigned long bi_rw, last = raw_cpu_ptr(stats->last); stats_aux->merged = (bi_sector == (ACCESS_ONCE(last->last_sector) && - ((bi_rw & (REQ_WRITE | REQ_DISCARD)) == - (ACCESS_ONCE(last->last_rw) & (REQ_WRITE | REQ_DISCARD))) + ((bi_rw == WRITE) == + (ACCESS_ONCE(last->last_rw) == WRITE)) )); ACCESS_ONCE(last->last_sector) = end_sector; ACCESS_ONCE(last->last_rw) = bi_rw; diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c index 797ddb900b06..48f1c01d7b9f 100644 --- a/drivers/md/dm-stripe.c +++ b/drivers/md/dm-stripe.c @@ -286,14 +286,14 @@ static int stripe_map(struct dm_target *ti, struct bio *bio) uint32_t stripe; unsigned target_bio_nr; - if (bio->bi_rw & REQ_FLUSH) { + if (bio->bi_rw & REQ_PREFLUSH) { target_bio_nr = dm_bio_get_target_bio_nr(bio); BUG_ON(target_bio_nr >= sc->stripes); bio->bi_bdev = sc->stripe[target_bio_nr].dev->bdev; return DM_MAPIO_REMAPPED; } - if (unlikely(bio->bi_rw & REQ_DISCARD) || - unlikely(bio->bi_rw & REQ_WRITE_SAME)) { + if (unlikely(bio_op(bio) == REQ_OP_DISCARD) || + unlikely(bio_op(bio) == REQ_OP_WRITE_SAME)) { target_bio_nr = dm_bio_get_target_bio_nr(bio); BUG_ON(target_bio_nr >= sc->stripes); return stripe_map_range(sc, bio, target_bio_nr); diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index fc803d50f9f0..5f9e3d799d66 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c @@ -360,7 +360,7 @@ static int issue_discard(struct discard_op *op, dm_block_t data_b, dm_block_t da sector_t len = block_to_sectors(tc->pool, data_e - data_b); return __blkdev_issue_discard(tc->pool_dev->bdev, s, len, - GFP_NOWAIT, REQ_WRITE | REQ_DISCARD, &op->bio); + GFP_NOWAIT, 0, &op->bio); } static void end_discard(struct discard_op *op, int r) @@ -371,7 +371,8 @@ static void end_discard(struct discard_op *op, int r) * need to wait for the chain to complete. */ bio_chain(op->bio, op->parent_bio); - submit_bio(REQ_WRITE | REQ_DISCARD, op->bio); + bio_set_op_attrs(op->bio, REQ_OP_DISCARD, 0); + submit_bio(op->bio); } blk_finish_plug(&op->plug); @@ -696,7 +697,7 @@ static void remap_to_origin(struct thin_c *tc, struct bio *bio) static int bio_triggers_commit(struct thin_c *tc, struct bio *bio) { - return (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) && + return (bio->bi_rw & (REQ_PREFLUSH | REQ_FUA)) && dm_thin_changed_this_transaction(tc->td); } @@ -704,7 +705,7 @@ static void inc_all_io_entry(struct pool *pool, struct bio *bio) { struct dm_thin_endio_hook *h; - if (bio->bi_rw & REQ_DISCARD) + if (bio_op(bio) == REQ_OP_DISCARD) return; h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); @@ -867,7 +868,8 @@ static void __inc_remap_and_issue_cell(void *context, struct bio *bio; while ((bio = bio_list_pop(&cell->bios))) { - if (bio->bi_rw & (REQ_DISCARD | REQ_FLUSH | REQ_FUA)) + if (bio->bi_rw & (REQ_PREFLUSH | REQ_FUA) || + bio_op(bio) == REQ_OP_DISCARD) bio_list_add(&info->defer_bios, bio); else { inc_all_io_entry(info->tc->pool, bio); @@ -1639,7 +1641,8 @@ static void __remap_and_issue_shared_cell(void *context, while ((bio = bio_list_pop(&cell->bios))) { if ((bio_data_dir(bio) == WRITE) || - (bio->bi_rw & (REQ_DISCARD | REQ_FLUSH | REQ_FUA))) + (bio->bi_rw & (REQ_PREFLUSH | REQ_FUA) || + bio_op(bio) == REQ_OP_DISCARD)) bio_list_add(&info->defer_bios, bio); else { struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));; @@ -2028,7 +2031,7 @@ static void process_thin_deferred_bios(struct thin_c *tc) break; } - if (bio->bi_rw & REQ_DISCARD) + if (bio_op(bio) == REQ_OP_DISCARD) pool->process_discard(tc, bio); else pool->process_bio(tc, bio); @@ -2115,7 +2118,7 @@ static void process_thin_deferred_cells(struct thin_c *tc) return; } - if (cell->holder->bi_rw & REQ_DISCARD) + if (bio_op(cell->holder) == REQ_OP_DISCARD) pool->process_discard_cell(tc, cell); else pool->process_cell(tc, cell); @@ -2553,7 +2556,8 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio) return DM_MAPIO_SUBMITTED; } - if (bio->bi_rw & (REQ_DISCARD | REQ_FLUSH | REQ_FUA)) { + if (bio->bi_rw & (REQ_PREFLUSH | REQ_FUA) || + bio_op(bio) == REQ_OP_DISCARD) { thin_defer_bio_with_throttle(tc, bio); return DM_MAPIO_SUBMITTED; } diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 1b2f96205361..aba7ed9abb3a 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -723,8 +723,9 @@ static void start_io_acct(struct dm_io *io) atomic_inc_return(&md->pending[rw])); if (unlikely(dm_stats_used(&md->stats))) - dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_iter.bi_sector, - bio_sectors(bio), false, 0, &io->stats_aux); + dm_stats_account_io(&md->stats, bio_data_dir(bio), + bio->bi_iter.bi_sector, bio_sectors(bio), + false, 0, &io->stats_aux); } static void end_io_acct(struct dm_io *io) @@ -738,8 +739,9 @@ static void end_io_acct(struct dm_io *io) generic_end_io_acct(rw, &dm_disk(md)->part0, io->start_time); if (unlikely(dm_stats_used(&md->stats))) - dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_iter.bi_sector, - bio_sectors(bio), true, duration, &io->stats_aux); + dm_stats_account_io(&md->stats, bio_data_dir(bio), + bio->bi_iter.bi_sector, bio_sectors(bio), + true, duration, &io->stats_aux); /* * After this is decremented the bio must not be touched if it is @@ -1001,12 +1003,12 @@ static void dec_pending(struct dm_io *io, int error) if (io_error == DM_ENDIO_REQUEUE) return; - if ((bio->bi_rw & REQ_FLUSH) && bio->bi_iter.bi_size) { + if ((bio->bi_rw & REQ_PREFLUSH) && bio->bi_iter.bi_size) { /* * Preflush done for flush with data, reissue - * without REQ_FLUSH. + * without REQ_PREFLUSH. */ - bio->bi_rw &= ~REQ_FLUSH; + bio->bi_rw &= ~REQ_PREFLUSH; queue_io(md, bio); } else { /* done with normal IO or empty flush */ @@ -1051,7 +1053,7 @@ static void clone_endio(struct bio *bio) } } - if (unlikely(r == -EREMOTEIO && (bio->bi_rw & REQ_WRITE_SAME) && + if (unlikely(r == -EREMOTEIO && (bio_op(bio) == REQ_OP_WRITE_SAME) && !bdev_get_queue(bio->bi_bdev)->limits.max_write_same_sectors)) disable_write_same(md); @@ -1121,9 +1123,9 @@ static void rq_end_stats(struct mapped_device *md, struct request *orig) if (unlikely(dm_stats_used(&md->stats))) { struct dm_rq_target_io *tio = tio_from_request(orig); tio->duration_jiffies = jiffies - tio->duration_jiffies; - dm_stats_account_io(&md->stats, orig->cmd_flags, blk_rq_pos(orig), - tio->n_sectors, true, tio->duration_jiffies, - &tio->stats_aux); + dm_stats_account_io(&md->stats, rq_data_dir(orig), + blk_rq_pos(orig), tio->n_sectors, true, + tio->duration_jiffies, &tio->stats_aux); } } @@ -1320,7 +1322,7 @@ static void dm_done(struct request *clone, int error, bool mapped) r = rq_end_io(tio->ti, clone, error, &tio->info); } - if (unlikely(r == -EREMOTEIO && (clone->cmd_flags & REQ_WRITE_SAME) && + if (unlikely(r == -EREMOTEIO && (req_op(clone) == REQ_OP_WRITE_SAME) && !clone->q->limits.max_write_same_sectors)) disable_write_same(tio->md); @@ -1475,7 +1477,7 @@ EXPORT_SYMBOL_GPL(dm_set_target_max_io_len); /* * A target may call dm_accept_partial_bio only from the map routine. It is - * allowed for all bio types except REQ_FLUSH. + * allowed for all bio types except REQ_PREFLUSH. * * dm_accept_partial_bio informs the dm that the target only wants to process * additional n_sectors sectors of the bio and the rest of the data should be @@ -1505,7 +1507,7 @@ void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors) { struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); unsigned bi_size = bio->bi_iter.bi_size >> SECTOR_SHIFT; - BUG_ON(bio->bi_rw & REQ_FLUSH); + BUG_ON(bio->bi_rw & REQ_PREFLUSH); BUG_ON(bi_size > *tio->len_ptr); BUG_ON(n_sectors > bi_size); *tio->len_ptr -= bi_size - n_sectors; @@ -1746,9 +1748,9 @@ static int __split_and_process_non_flush(struct clone_info *ci) unsigned len; int r; - if (unlikely(bio->bi_rw & REQ_DISCARD)) + if (unlikely(bio_op(bio) == REQ_OP_DISCARD)) return __send_discard(ci); - else if (unlikely(bio->bi_rw & REQ_WRITE_SAME)) + else if (unlikely(bio_op(bio) == REQ_OP_WRITE_SAME)) return __send_write_same(ci); ti = dm_table_find_target(ci->map, ci->sector); @@ -1793,7 +1795,7 @@ static void __split_and_process_bio(struct mapped_device *md, start_io_acct(ci.io); - if (bio->bi_rw & REQ_FLUSH) { + if (bio->bi_rw & REQ_PREFLUSH) { ci.bio = &ci.md->flush_bio; ci.sector_count = 0; error = __send_empty_flush(&ci); @@ -2082,8 +2084,9 @@ static void dm_start_request(struct mapped_device *md, struct request *orig) struct dm_rq_target_io *tio = tio_from_request(orig); tio->duration_jiffies = jiffies; tio->n_sectors = blk_rq_sectors(orig); - dm_stats_account_io(&md->stats, orig->cmd_flags, blk_rq_pos(orig), - tio->n_sectors, false, 0, &tio->stats_aux); + dm_stats_account_io(&md->stats, rq_data_dir(orig), + blk_rq_pos(orig), tio->n_sectors, false, 0, + &tio->stats_aux); } /* @@ -2168,7 +2171,7 @@ static void dm_request_fn(struct request_queue *q) /* always use block 0 to find the target for flushes for now */ pos = 0; - if (!(rq->cmd_flags & REQ_FLUSH)) + if (req_op(rq) != REQ_OP_FLUSH) pos = blk_rq_pos(rq); if ((dm_request_peeked_before_merge_deadline(md) && @@ -2412,7 +2415,7 @@ static struct mapped_device *alloc_dev(int minor) bio_init(&md->flush_bio); md->flush_bio.bi_bdev = md->bdev; - md->flush_bio.bi_rw = WRITE_FLUSH; + bio_set_op_attrs(&md->flush_bio, REQ_OP_WRITE, WRITE_FLUSH); dm_stats_init(&md->stats); diff --git a/drivers/md/linear.c b/drivers/md/linear.c index b7fe7e9fc777..70ff888d25d0 100644 --- a/drivers/md/linear.c +++ b/drivers/md/linear.c @@ -221,7 +221,7 @@ static void linear_make_request(struct mddev *mddev, struct bio *bio) struct bio *split; sector_t start_sector, end_sector, data_offset; - if (unlikely(bio->bi_rw & REQ_FLUSH)) { + if (unlikely(bio->bi_rw & REQ_PREFLUSH)) { md_flush_request(mddev, bio); return; } @@ -252,7 +252,7 @@ static void linear_make_request(struct mddev *mddev, struct bio *bio) split->bi_iter.bi_sector = split->bi_iter.bi_sector - start_sector + data_offset; - if (unlikely((split->bi_rw & REQ_DISCARD) && + if (unlikely((bio_op(split) == REQ_OP_DISCARD) && !blk_queue_discard(bdev_get_queue(split->bi_bdev)))) { /* Just ignore it */ bio_endio(split); diff --git a/drivers/md/md.c b/drivers/md/md.c index 866825f10b4c..1f123f5a29da 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -394,8 +394,9 @@ static void submit_flushes(struct work_struct *ws) bi->bi_end_io = md_end_flush; bi->bi_private = rdev; bi->bi_bdev = rdev->bdev; + bio_set_op_attrs(bi, REQ_OP_WRITE, WRITE_FLUSH); atomic_inc(&mddev->flush_pending); - submit_bio(WRITE_FLUSH, bi); + submit_bio(bi); rcu_read_lock(); rdev_dec_pending(rdev, mddev); } @@ -413,7 +414,7 @@ static void md_submit_flush_data(struct work_struct *ws) /* an empty barrier - all done */ bio_endio(bio); else { - bio->bi_rw &= ~REQ_FLUSH; + bio->bi_rw &= ~REQ_PREFLUSH; mddev->pers->make_request(mddev, bio); } @@ -742,9 +743,10 @@ void md_super_write(struct mddev *mddev, struct md_rdev *rdev, bio_add_page(bio, page, size, 0); bio->bi_private = rdev; bio->bi_end_io = super_written; + bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_FLUSH_FUA); atomic_inc(&mddev->pending_writes); - submit_bio(WRITE_FLUSH_FUA, bio); + submit_bio(bio); } void md_super_wait(struct mddev *mddev) @@ -754,13 +756,14 @@ void md_super_wait(struct mddev *mddev) } int sync_page_io(struct md_rdev *rdev, sector_t sector, int size, - struct page *page, int rw, bool metadata_op) + struct page *page, int op, int op_flags, bool metadata_op) { struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, rdev->mddev); int ret; bio->bi_bdev = (metadata_op && rdev->meta_bdev) ? rdev->meta_bdev : rdev->bdev; + bio_set_op_attrs(bio, op, op_flags); if (metadata_op) bio->bi_iter.bi_sector = sector + rdev->sb_start; else if (rdev->mddev->reshape_position != MaxSector && @@ -770,7 +773,8 @@ int sync_page_io(struct md_rdev *rdev, sector_t sector, int size, else bio->bi_iter.bi_sector = sector + rdev->data_offset; bio_add_page(bio, page, size, 0); - submit_bio_wait(rw, bio); + + submit_bio_wait(bio); ret = !bio->bi_error; bio_put(bio); @@ -785,7 +789,7 @@ static int read_disk_sb(struct md_rdev *rdev, int size) if (rdev->sb_loaded) return 0; - if (!sync_page_io(rdev, 0, size, rdev->sb_page, READ, true)) + if (!sync_page_io(rdev, 0, size, rdev->sb_page, REQ_OP_READ, 0, true)) goto fail; rdev->sb_loaded = 1; return 0; @@ -1471,7 +1475,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_ return -EINVAL; bb_sector = (long long)offset; if (!sync_page_io(rdev, bb_sector, sectors << 9, - rdev->bb_page, READ, true)) + rdev->bb_page, REQ_OP_READ, 0, true)) return -EIO; bbp = (u64 *)page_address(rdev->bb_page); rdev->badblocks.shift = sb->bblog_shift; diff --git a/drivers/md/md.h b/drivers/md/md.h index b5c4be73e6e4..b4f335245bd6 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h @@ -424,7 +424,7 @@ struct mddev { /* Generic flush handling. * The last to finish preflush schedules a worker to submit - * the rest of the request (without the REQ_FLUSH flag). + * the rest of the request (without the REQ_PREFLUSH flag). */ struct bio *flush_bio; atomic_t flush_pending; @@ -618,7 +618,8 @@ extern void md_super_write(struct mddev *mddev, struct md_rdev *rdev, sector_t sector, int size, struct page *page); extern void md_super_wait(struct mddev *mddev); extern int sync_page_io(struct md_rdev *rdev, sector_t sector, int size, - struct page *page, int rw, bool metadata_op); + struct page *page, int op, int op_flags, + bool metadata_op); extern void md_do_sync(struct md_thread *thread); extern void md_new_event(struct mddev *mddev); extern int md_allow_write(struct mddev *mddev); diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c index dd483bb2e111..72ea98e89e57 100644 --- a/drivers/md/multipath.c +++ b/drivers/md/multipath.c @@ -111,7 +111,7 @@ static void multipath_make_request(struct mddev *mddev, struct bio * bio) struct multipath_bh * mp_bh; struct multipath_info *multipath; - if (unlikely(bio->bi_rw & REQ_FLUSH)) { + if (unlikely(bio->bi_rw & REQ_PREFLUSH)) { md_flush_request(mddev, bio); return; } diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index 34783a3c8b3c..c3d439083212 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c @@ -458,7 +458,7 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio) struct md_rdev *tmp_dev; struct bio *split; - if (unlikely(bio->bi_rw & REQ_FLUSH)) { + if (unlikely(bio->bi_rw & REQ_PREFLUSH)) { md_flush_request(mddev, bio); return; } @@ -488,7 +488,7 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio) split->bi_iter.bi_sector = sector + zone->dev_start + tmp_dev->data_offset; - if (unlikely((split->bi_rw & REQ_DISCARD) && + if (unlikely((bio_op(split) == REQ_OP_DISCARD) && !blk_queue_discard(bdev_get_queue(split->bi_bdev)))) { /* Just ignore it */ bio_endio(split); diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index c7c8cde0ab21..10e53cd6a995 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -759,7 +759,7 @@ static void flush_pending_writes(struct r1conf *conf) while (bio) { /* submit pending writes */ struct bio *next = bio->bi_next; bio->bi_next = NULL; - if (unlikely((bio->bi_rw & REQ_DISCARD) && + if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) /* Just ignore it */ bio_endio(bio); @@ -1033,7 +1033,7 @@ static void raid1_unplug(struct blk_plug_cb *cb, bool from_schedule) while (bio) { /* submit pending writes */ struct bio *next = bio->bi_next; bio->bi_next = NULL; - if (unlikely((bio->bi_rw & REQ_DISCARD) && + if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) /* Just ignore it */ bio_endio(bio); @@ -1053,12 +1053,12 @@ static void raid1_make_request(struct mddev *mddev, struct bio * bio) int i, disks; struct bitmap *bitmap; unsigned long flags; + const int op = bio_op(bio); const int rw = bio_data_dir(bio); const unsigned long do_sync = (bio->bi_rw & REQ_SYNC); - const unsigned long do_flush_fua = (bio->bi_rw & (REQ_FLUSH | REQ_FUA)); - const unsigned long do_discard = (bio->bi_rw - & (REQ_DISCARD | REQ_SECURE)); - const unsigned long do_same = (bio->bi_rw & REQ_WRITE_SAME); + const unsigned long do_flush_fua = (bio->bi_rw & + (REQ_PREFLUSH | REQ_FUA)); + const unsigned long do_sec = (bio->bi_rw & REQ_SECURE); struct md_rdev *blocked_rdev; struct blk_plug_cb *cb; struct raid1_plug_cb *plug = NULL; @@ -1166,7 +1166,7 @@ read_again: mirror->rdev->data_offset; read_bio->bi_bdev = mirror->rdev->bdev; read_bio->bi_end_io = raid1_end_read_request; - read_bio->bi_rw = READ | do_sync; + bio_set_op_attrs(read_bio, op, do_sync); read_bio->bi_private = r1_bio; if (max_sectors < r1_bio->sectors) { @@ -1376,8 +1376,7 @@ read_again: conf->mirrors[i].rdev->data_offset); mbio->bi_bdev = conf->mirrors[i].rdev->bdev; mbio->bi_end_io = raid1_end_write_request; - mbio->bi_rw = - WRITE | do_flush_fua | do_sync | do_discard | do_same; + bio_set_op_attrs(mbio, op, do_flush_fua | do_sync | do_sec); mbio->bi_private = r1_bio; atomic_inc(&r1_bio->remaining); @@ -1771,7 +1770,7 @@ static void end_sync_write(struct bio *bio) static int r1_sync_page_io(struct md_rdev *rdev, sector_t sector, int sectors, struct page *page, int rw) { - if (sync_page_io(rdev, sector, sectors << 9, page, rw, false)) + if (sync_page_io(rdev, sector, sectors << 9, page, rw, 0, false)) /* success */ return 1; if (rw == WRITE) { @@ -1825,7 +1824,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio) rdev = conf->mirrors[d].rdev; if (sync_page_io(rdev, sect, s<<9, bio->bi_io_vec[idx].bv_page, - READ, false)) { + REQ_OP_READ, 0, false)) { success = 1; break; } @@ -2030,7 +2029,7 @@ static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio) !test_bit(MD_RECOVERY_SYNC, &mddev->recovery)))) continue; - wbio->bi_rw = WRITE; + bio_set_op_attrs(wbio, REQ_OP_WRITE, 0); wbio->bi_end_io = end_sync_write; atomic_inc(&r1_bio->remaining); md_sync_acct(conf->mirrors[i].rdev->bdev, bio_sectors(wbio)); @@ -2090,7 +2089,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk, is_badblock(rdev, sect, s, &first_bad, &bad_sectors) == 0 && sync_page_io(rdev, sect, s<<9, - conf->tmppage, READ, false)) + conf->tmppage, REQ_OP_READ, 0, false)) success = 1; else { d++; @@ -2201,14 +2200,15 @@ static int narrow_write_error(struct r1bio *r1_bio, int i) wbio = bio_clone_mddev(r1_bio->master_bio, GFP_NOIO, mddev); } - wbio->bi_rw = WRITE; + bio_set_op_attrs(wbio, REQ_OP_WRITE, 0); wbio->bi_iter.bi_sector = r1_bio->sector; wbio->bi_iter.bi_size = r1_bio->sectors << 9; bio_trim(wbio, sector - r1_bio->sector, sectors); wbio->bi_iter.bi_sector += rdev->data_offset; wbio->bi_bdev = rdev->bdev; - if (submit_bio_wait(WRITE, wbio) < 0) + + if (submit_bio_wait(wbio) < 0) /* failure! */ ok = rdev_set_badblocks(rdev, sector, sectors, 0) @@ -2343,7 +2343,7 @@ read_more: bio->bi_iter.bi_sector = r1_bio->sector + rdev->data_offset; bio->bi_bdev = rdev->bdev; bio->bi_end_io = raid1_end_read_request; - bio->bi_rw = READ | do_sync; + bio_set_op_attrs(bio, REQ_OP_READ, do_sync); bio->bi_private = r1_bio; if (max_sectors < r1_bio->sectors) { /* Drat - have to split this up more */ @@ -2571,7 +2571,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr, if (i < conf->raid_disks) still_degraded = 1; } else if (!test_bit(In_sync, &rdev->flags)) { - bio->bi_rw = WRITE; + bio_set_op_attrs(bio, REQ_OP_WRITE, 0); bio->bi_end_io = end_sync_write; write_targets ++; } else { @@ -2598,7 +2598,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr, if (disk < 0) disk = i; } - bio->bi_rw = READ; + bio_set_op_attrs(bio, REQ_OP_READ, 0); bio->bi_end_io = end_sync_read; read_targets++; } else if (!test_bit(WriteErrorSeen, &rdev->flags) && @@ -2610,7 +2610,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr, * if we are doing resync or repair. Otherwise, leave * this device alone for this sync request. */ - bio->bi_rw = WRITE; + bio_set_op_attrs(bio, REQ_OP_WRITE, 0); bio->bi_end_io = end_sync_write; write_targets++; } diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index c7de2a53e625..245640b50153 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -865,7 +865,7 @@ static void flush_pending_writes(struct r10conf *conf) while (bio) { /* submit pending writes */ struct bio *next = bio->bi_next; bio->bi_next = NULL; - if (unlikely((bio->bi_rw & REQ_DISCARD) && + if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) /* Just ignore it */ bio_endio(bio); @@ -1041,7 +1041,7 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule) while (bio) { /* submit pending writes */ struct bio *next = bio->bi_next; bio->bi_next = NULL; - if (unlikely((bio->bi_rw & REQ_DISCARD) && + if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) /* Just ignore it */ bio_endio(bio); @@ -1058,12 +1058,11 @@ static void __make_request(struct mddev *mddev, struct bio *bio) struct r10bio *r10_bio; struct bio *read_bio; int i; + const int op = bio_op(bio); const int rw = bio_data_dir(bio); const unsigned long do_sync = (bio->bi_rw & REQ_SYNC); const unsigned long do_fua = (bio->bi_rw & REQ_FUA); - const unsigned long do_discard = (bio->bi_rw - & (REQ_DISCARD | REQ_SECURE)); - const unsigned long do_same = (bio->bi_rw & REQ_WRITE_SAME); + const unsigned long do_sec = (bio->bi_rw & REQ_SECURE); unsigned long flags; struct md_rdev *blocked_rdev; struct blk_plug_cb *cb; @@ -1156,7 +1155,7 @@ read_again: choose_data_offset(r10_bio, rdev); read_bio->bi_bdev = rdev->bdev; read_bio->bi_end_io = raid10_end_read_request; - read_bio->bi_rw = READ | do_sync; + bio_set_op_attrs(read_bio, op, do_sync); read_bio->bi_private = r10_bio; if (max_sectors < r10_bio->sectors) { @@ -1363,8 +1362,7 @@ retry_write: rdev)); mbio->bi_bdev = rdev->bdev; mbio->bi_end_io = raid10_end_write_request; - mbio->bi_rw = - WRITE | do_sync | do_fua | do_discard | do_same; + bio_set_op_attrs(mbio, op, do_sync | do_fua | do_sec); mbio->bi_private = r10_bio; atomic_inc(&r10_bio->remaining); @@ -1406,8 +1404,7 @@ retry_write: r10_bio, rdev)); mbio->bi_bdev = rdev->bdev; mbio->bi_end_io = raid10_end_write_request; - mbio->bi_rw = - WRITE | do_sync | do_fua | do_discard | do_same; + bio_set_op_attrs(mbio, op, do_sync | do_fua | do_sec); mbio->bi_private = r10_bio; atomic_inc(&r10_bio->remaining); @@ -1450,7 +1447,7 @@ static void raid10_make_request(struct mddev *mddev, struct bio *bio) struct bio *split; - if (unlikely(bio->bi_rw & REQ_FLUSH)) { + if (unlikely(bio->bi_rw & REQ_PREFLUSH)) { md_flush_request(mddev, bio); return; } @@ -1992,10 +1989,10 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio) tbio->bi_vcnt = vcnt; tbio->bi_iter.bi_size = fbio->bi_iter.bi_size; - tbio->bi_rw = WRITE; tbio->bi_private = r10_bio; tbio->bi_iter.bi_sector = r10_bio->devs[i].addr; tbio->bi_end_io = end_sync_write; + bio_set_op_attrs(tbio, REQ_OP_WRITE, 0); bio_copy_data(tbio, fbio); @@ -2078,7 +2075,7 @@ static void fix_recovery_read_error(struct r10bio *r10_bio) addr, s << 9, bio->bi_io_vec[idx].bv_page, - READ, false); + REQ_OP_READ, 0, false); if (ok) { rdev = conf->mirrors[dw].rdev; addr = r10_bio->devs[1].addr + sect; @@ -2086,7 +2083,7 @@ static void fix_recovery_read_error(struct r10bio *r10_bio) addr, s << 9, bio->bi_io_vec[idx].bv_page, - WRITE, false); + REQ_OP_WRITE, 0, false); if (!ok) { set_bit(WriteErrorSeen, &rdev->flags); if (!test_and_set_bit(WantReplacement, @@ -2213,7 +2210,7 @@ static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector, if (is_badblock(rdev, sector, sectors, &first_bad, &bad_sectors) && (rw == READ || test_bit(WriteErrorSeen, &rdev->flags))) return -1; - if (sync_page_io(rdev, sector, sectors << 9, page, rw, false)) + if (sync_page_io(rdev, sector, sectors << 9, page, rw, 0, false)) /* success */ return 1; if (rw == WRITE) { @@ -2299,7 +2296,8 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10 r10_bio->devs[sl].addr + sect, s<<9, - conf->tmppage, READ, false); + conf->tmppage, + REQ_OP_READ, 0, false); rdev_dec_pending(rdev, mddev); rcu_read_lock(); if (success) @@ -2474,7 +2472,9 @@ static int narrow_write_error(struct r10bio *r10_bio, int i) choose_data_offset(r10_bio, rdev) + (sector - r10_bio->sector)); wbio->bi_bdev = rdev->bdev; - if (submit_bio_wait(WRITE, wbio) < 0) + bio_set_op_attrs(wbio, REQ_OP_WRITE, 0); + + if (submit_bio_wait(wbio) < 0) /* Failure! */ ok = rdev_set_badblocks(rdev, sector, sectors, 0) @@ -2548,7 +2548,7 @@ read_more: bio->bi_iter.bi_sector = r10_bio->devs[slot].addr + choose_data_offset(r10_bio, rdev); bio->bi_bdev = rdev->bdev; - bio->bi_rw = READ | do_sync; + bio_set_op_attrs(bio, REQ_OP_READ, do_sync); bio->bi_private = r10_bio; bio->bi_end_io = raid10_end_read_request; if (max_sectors < r10_bio->sectors) { @@ -3038,7 +3038,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, biolist = bio; bio->bi_private = r10_bio; bio->bi_end_io = end_sync_read; - bio->bi_rw = READ; + bio_set_op_attrs(bio, REQ_OP_READ, 0); from_addr = r10_bio->devs[j].addr; bio->bi_iter.bi_sector = from_addr + rdev->data_offset; @@ -3064,7 +3064,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, biolist = bio; bio->bi_private = r10_bio; bio->bi_end_io = end_sync_write; - bio->bi_rw = WRITE; + bio_set_op_attrs(bio, REQ_OP_WRITE, 0); bio->bi_iter.bi_sector = to_addr + rdev->data_offset; bio->bi_bdev = rdev->bdev; @@ -3093,7 +3093,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, biolist = bio; bio->bi_private = r10_bio; bio->bi_end_io = end_sync_write; - bio->bi_rw = WRITE; + bio_set_op_attrs(bio, REQ_OP_WRITE, 0); bio->bi_iter.bi_sector = to_addr + rdev->data_offset; bio->bi_bdev = rdev->bdev; @@ -3213,7 +3213,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, biolist = bio; bio->bi_private = r10_bio; bio->bi_end_io = end_sync_read; - bio->bi_rw = READ; + bio_set_op_attrs(bio, REQ_OP_READ, 0); bio->bi_iter.bi_sector = sector + conf->mirrors[d].rdev->data_offset; bio->bi_bdev = conf->mirrors[d].rdev->bdev; @@ -3235,7 +3235,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, biolist = bio; bio->bi_private = r10_bio; bio->bi_end_io = end_sync_write; - bio->bi_rw = WRITE; + bio_set_op_attrs(bio, REQ_OP_WRITE, 0); bio->bi_iter.bi_sector = sector + conf->mirrors[d].replacement->data_offset; bio->bi_bdev = conf->mirrors[d].replacement->bdev; @@ -4320,7 +4320,7 @@ read_more: + rdev->data_offset); read_bio->bi_private = r10_bio; read_bio->bi_end_io = end_sync_read; - read_bio->bi_rw = READ; + bio_set_op_attrs(read_bio, REQ_OP_READ, 0); read_bio->bi_flags &= (~0UL << BIO_RESET_BITS); read_bio->bi_error = 0; read_bio->bi_vcnt = 0; @@ -4354,7 +4354,7 @@ read_more: rdev2->new_data_offset; b->bi_private = r10_bio; b->bi_end_io = end_reshape_write; - b->bi_rw = WRITE; + bio_set_op_attrs(b, REQ_OP_WRITE, 0); b->bi_next = blist; blist = b; } @@ -4522,7 +4522,7 @@ static int handle_reshape_read_error(struct mddev *mddev, addr, s << 9, bvec[idx].bv_page, - READ, false); + REQ_OP_READ, 0, false); if (success) break; failed: diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c index e889e2deb7b3..5504ce2bac06 100644 --- a/drivers/md/raid5-cache.c +++ b/drivers/md/raid5-cache.c @@ -254,14 +254,14 @@ static void r5l_submit_current_io(struct r5l_log *log) __r5l_set_io_unit_state(io, IO_UNIT_IO_START); spin_unlock_irqrestore(&log->io_list_lock, flags); - submit_bio(WRITE, io->current_bio); + submit_bio(io->current_bio); } static struct bio *r5l_bio_alloc(struct r5l_log *log) { struct bio *bio = bio_alloc_bioset(GFP_NOIO, BIO_MAX_PAGES, log->bs); - bio->bi_rw = WRITE; + bio_set_op_attrs(bio, REQ_OP_WRITE, 0); bio->bi_bdev = log->rdev->bdev; bio->bi_iter.bi_sector = log->rdev->data_offset + log->log_start; @@ -373,7 +373,7 @@ static void r5l_append_payload_page(struct r5l_log *log, struct page *page) io->current_bio = r5l_bio_alloc(log); bio_chain(io->current_bio, prev); - submit_bio(WRITE, prev); + submit_bio(prev); } if (!bio_add_page(io->current_bio, page, PAGE_SIZE, 0)) @@ -536,7 +536,7 @@ int r5l_handle_flush_request(struct r5l_log *log, struct bio *bio) bio_endio(bio); return 0; } - bio->bi_rw &= ~REQ_FLUSH; + bio->bi_rw &= ~REQ_PREFLUSH; return -EAGAIN; } @@ -686,7 +686,8 @@ void r5l_flush_stripe_to_raid(struct r5l_log *log) bio_reset(&log->flush_bio); log->flush_bio.bi_bdev = log->rdev->bdev; log->flush_bio.bi_end_io = r5l_log_flush_endio; - submit_bio(WRITE_FLUSH, &log->flush_bio); + bio_set_op_attrs(&log->flush_bio, REQ_OP_WRITE, WRITE_FLUSH); + submit_bio(&log->flush_bio); } static void r5l_write_super(struct r5l_log *log, sector_t cp); @@ -881,7 +882,8 @@ static int r5l_read_meta_block(struct r5l_log *log, struct r5l_meta_block *mb; u32 crc, stored_crc; - if (!sync_page_io(log->rdev, ctx->pos, PAGE_SIZE, page, READ, false)) + if (!sync_page_io(log->rdev, ctx->pos, PAGE_SIZE, page, REQ_OP_READ, 0, + false)) return -EIO; mb = page_address(page); @@ -926,7 +928,8 @@ static int r5l_recovery_flush_one_stripe(struct r5l_log *log, &disk_index, sh); sync_page_io(log->rdev, *log_offset, PAGE_SIZE, - sh->dev[disk_index].page, READ, false); + sh->dev[disk_index].page, REQ_OP_READ, 0, + false); sh->dev[disk_index].log_checksum = le32_to_cpu(payload->checksum[0]); set_bit(R5_Wantwrite, &sh->dev[disk_index].flags); @@ -934,7 +937,8 @@ static int r5l_recovery_flush_one_stripe(struct r5l_log *log, } else { disk_index = sh->pd_idx; sync_page_io(log->rdev, *log_offset, PAGE_SIZE, - sh->dev[disk_index].page, READ, false); + sh->dev[disk_index].page, REQ_OP_READ, 0, + false); sh->dev[disk_index].log_checksum = le32_to_cpu(payload->checksum[0]); set_bit(R5_Wantwrite, &sh->dev[disk_index].flags); @@ -944,7 +948,7 @@ static int r5l_recovery_flush_one_stripe(struct r5l_log *log, sync_page_io(log->rdev, r5l_ring_add(log, *log_offset, BLOCK_SECTORS), PAGE_SIZE, sh->dev[disk_index].page, - READ, false); + REQ_OP_READ, 0, false); sh->dev[disk_index].log_checksum = le32_to_cpu(payload->checksum[1]); set_bit(R5_Wantwrite, @@ -986,11 +990,13 @@ static int r5l_recovery_flush_one_stripe(struct r5l_log *log, rdev = rcu_dereference(conf->disks[disk_index].rdev); if (rdev) sync_page_io(rdev, stripe_sect, PAGE_SIZE, - sh->dev[disk_index].page, WRITE, false); + sh->dev[disk_index].page, REQ_OP_WRITE, 0, + false); rrdev = rcu_dereference(conf->disks[disk_index].replacement); if (rrdev) sync_page_io(rrdev, stripe_sect, PAGE_SIZE, - sh->dev[disk_index].page, WRITE, false); + sh->dev[disk_index].page, REQ_OP_WRITE, 0, + false); } raid5_release_stripe(sh); return 0; @@ -1062,7 +1068,8 @@ static int r5l_log_write_empty_meta_block(struct r5l_log *log, sector_t pos, crc = crc32c_le(log->uuid_checksum, mb, PAGE_SIZE); mb->checksum = cpu_to_le32(crc); - if (!sync_page_io(log->rdev, pos, PAGE_SIZE, page, WRITE_FUA, false)) { + if (!sync_page_io(log->rdev, pos, PAGE_SIZE, page, REQ_OP_WRITE, + WRITE_FUA, false)) { __free_page(page); return -EIO; } @@ -1137,7 +1144,7 @@ static int r5l_load_log(struct r5l_log *log) if (!page) return -ENOMEM; - if (!sync_page_io(rdev, cp, PAGE_SIZE, page, READ, false)) { + if (!sync_page_io(rdev, cp, PAGE_SIZE, page, REQ_OP_READ, 0, false)) { ret = -EIO; goto ioerr; } diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 8959e6dd31dd..7aacf5b55e15 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -806,7 +806,8 @@ static void stripe_add_to_batch_list(struct r5conf *conf, struct stripe_head *sh dd_idx = 0; while (dd_idx == sh->pd_idx || dd_idx == sh->qd_idx) dd_idx++; - if (head->dev[dd_idx].towrite->bi_rw != sh->dev[dd_idx].towrite->bi_rw) + if (head->dev[dd_idx].towrite->bi_rw != sh->dev[dd_idx].towrite->bi_rw || + bio_op(head->dev[dd_idx].towrite) != bio_op(sh->dev[dd_idx].towrite)) goto unlock_out; if (head->batch_head) { @@ -891,29 +892,28 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s) if (r5l_write_stripe(conf->log, sh) == 0) return; for (i = disks; i--; ) { - int rw; + int op, op_flags = 0; int replace_only = 0; struct bio *bi, *rbi; struct md_rdev *rdev, *rrdev = NULL; sh = head_sh; if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) { + op = REQ_OP_WRITE; if (test_and_clear_bit(R5_WantFUA, &sh->dev[i].flags)) - rw = WRITE_FUA; - else - rw = WRITE; + op_flags = WRITE_FUA; if (test_bit(R5_Discard, &sh->dev[i].flags)) - rw |= REQ_DISCARD; + op = REQ_OP_DISCARD; } else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags)) - rw = READ; + op = REQ_OP_READ; else if (test_and_clear_bit(R5_WantReplace, &sh->dev[i].flags)) { - rw = WRITE; + op = REQ_OP_WRITE; replace_only = 1; } else continue; if (test_and_clear_bit(R5_SyncIO, &sh->dev[i].flags)) - rw |= REQ_SYNC; + op_flags |= REQ_SYNC; again: bi = &sh->dev[i].req; @@ -927,7 +927,7 @@ again: rdev = rrdev; rrdev = NULL; } - if (rw & WRITE) { + if (op_is_write(op)) { if (replace_only) rdev = NULL; if (rdev == rrdev) @@ -953,7 +953,7 @@ again: * need to check for writes. We never accept write errors * on the replacement, so we don't to check rrdev. */ - while ((rw & WRITE) && rdev && + while (op_is_write(op) && rdev && test_bit(WriteErrorSeen, &rdev->flags)) { sector_t first_bad; int bad_sectors; @@ -995,13 +995,13 @@ again: bio_reset(bi); bi->bi_bdev = rdev->bdev; - bi->bi_rw = rw; - bi->bi_end_io = (rw & WRITE) + bio_set_op_attrs(bi, op, op_flags); + bi->bi_end_io = op_is_write(op) ? raid5_end_write_request : raid5_end_read_request; bi->bi_private = sh; - pr_debug("%s: for %llu schedule op %ld on disc %d\n", + pr_debug("%s: for %llu schedule op %d on disc %d\n", __func__, (unsigned long long)sh->sector, bi->bi_rw, i); atomic_inc(&sh->count); @@ -1027,7 +1027,7 @@ again: * If this is discard request, set bi_vcnt 0. We don't * want to confuse SCSI because SCSI will replace payload */ - if (rw & REQ_DISCARD) + if (op == REQ_OP_DISCARD) bi->bi_vcnt = 0; if (rrdev) set_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags); @@ -1047,12 +1047,12 @@ again: bio_reset(rbi); rbi->bi_bdev = rrdev->bdev; - rbi->bi_rw = rw; - BUG_ON(!(rw & WRITE)); + bio_set_op_attrs(rbi, op, op_flags); + BUG_ON(!op_is_write(op)); rbi->bi_end_io = raid5_end_write_request; rbi->bi_private = sh; - pr_debug("%s: for %llu schedule op %ld on " + pr_debug("%s: for %llu schedule op %d on " "replacement disc %d\n", __func__, (unsigned long long)sh->sector, rbi->bi_rw, i); @@ -1076,7 +1076,7 @@ again: * If this is discard request, set bi_vcnt 0. We don't * want to confuse SCSI because SCSI will replace payload */ - if (rw & REQ_DISCARD) + if (op == REQ_OP_DISCARD) rbi->bi_vcnt = 0; if (conf->mddev->gendisk) trace_block_bio_remap(bdev_get_queue(rbi->bi_bdev), @@ -1085,9 +1085,9 @@ again: generic_make_request(rbi); } if (!rdev && !rrdev) { - if (rw & WRITE) + if (op_is_write(op)) set_bit(STRIPE_DEGRADED, &sh->state); - pr_debug("skip op %ld on disc %d for sector %llu\n", + pr_debug("skip op %d on disc %d for sector %llu\n", bi->bi_rw, i, (unsigned long long)sh->sector); clear_bit(R5_LOCKED, &sh->dev[i].flags); set_bit(STRIPE_HANDLE, &sh->state); @@ -1623,7 +1623,7 @@ again: set_bit(R5_WantFUA, &dev->flags); if (wbi->bi_rw & REQ_SYNC) set_bit(R5_SyncIO, &dev->flags); - if (wbi->bi_rw & REQ_DISCARD) + if (bio_op(wbi) == REQ_OP_DISCARD) set_bit(R5_Discard, &dev->flags); else { tx = async_copy_data(1, wbi, &dev->page, @@ -5150,7 +5150,7 @@ static void raid5_make_request(struct mddev *mddev, struct bio * bi) DEFINE_WAIT(w); bool do_prepare; - if (unlikely(bi->bi_rw & REQ_FLUSH)) { + if (unlikely(bi->bi_rw & REQ_PREFLUSH)) { int ret = r5l_handle_flush_request(conf->log, bi); if (ret == 0) @@ -5176,7 +5176,7 @@ static void raid5_make_request(struct mddev *mddev, struct bio * bi) return; } - if (unlikely(bi->bi_rw & REQ_DISCARD)) { + if (unlikely(bio_op(bi) == REQ_OP_DISCARD)) { make_discard_request(mddev, bi); return; } diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c index c5472e3c9231..11ee4145983b 100644 --- a/drivers/mmc/card/block.c +++ b/drivers/mmc/card/block.c @@ -1724,8 +1724,8 @@ static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, struct request *req) !IS_ALIGNED(blk_rq_sectors(next), 8)) break; - if (next->cmd_flags & REQ_DISCARD || - next->cmd_flags & REQ_FLUSH) + if (req_op(next) == REQ_OP_DISCARD || + req_op(next) == REQ_OP_FLUSH) break; if (rq_data_dir(cur) != rq_data_dir(next)) @@ -2150,7 +2150,6 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) struct mmc_card *card = md->queue.card; struct mmc_host *host = card->host; unsigned long flags; - unsigned int cmd_flags = req ? req->cmd_flags : 0; if (req && !mq->mqrq_prev->req) /* claim host only for the first request */ @@ -2166,7 +2165,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) } mq->flags &= ~MMC_QUEUE_NEW_REQUEST; - if (cmd_flags & REQ_DISCARD) { + if (req && req_op(req) == REQ_OP_DISCARD) { /* complete ongoing async transfer before issuing discard */ if (card->host->areq) mmc_blk_issue_rw_rq(mq, NULL); @@ -2174,7 +2173,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) ret = mmc_blk_issue_secdiscard_rq(mq, req); else ret = mmc_blk_issue_discard_rq(mq, req); - } else if (cmd_flags & REQ_FLUSH) { + } else if (req && req_op(req) == REQ_OP_FLUSH) { /* complete ongoing async transfer before issuing flush */ if (card->host->areq) mmc_blk_issue_rw_rq(mq, NULL); @@ -2190,7 +2189,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) out: if ((!req && !(mq->flags & MMC_QUEUE_NEW_REQUEST)) || - (cmd_flags & MMC_REQ_SPECIAL_MASK)) + mmc_req_is_special(req)) /* * Release host when there are no more requests * and after special request(discard, flush) is done. diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c index 6f4323c6d653..c2d5f6f35145 100644 --- a/drivers/mmc/card/queue.c +++ b/drivers/mmc/card/queue.c @@ -33,7 +33,7 @@ static int mmc_prep_request(struct request_queue *q, struct request *req) /* * We only like normal block requests and discards. */ - if (req->cmd_type != REQ_TYPE_FS && !(req->cmd_flags & REQ_DISCARD)) { + if (req->cmd_type != REQ_TYPE_FS && req_op(req) != REQ_OP_DISCARD) { blk_dump_rq_flags(req, "MMC bad request"); return BLKPREP_KILL; } @@ -56,7 +56,6 @@ static int mmc_queue_thread(void *d) down(&mq->thread_sem); do { struct request *req = NULL; - unsigned int cmd_flags = 0; spin_lock_irq(q->queue_lock); set_current_state(TASK_INTERRUPTIBLE); @@ -66,7 +65,6 @@ static int mmc_queue_thread(void *d) if (req || mq->mqrq_prev->req) { set_current_state(TASK_RUNNING); - cmd_flags = req ? req->cmd_flags : 0; mq->issue_fn(mq, req); cond_resched(); if (mq->flags & MMC_QUEUE_NEW_REQUEST) { @@ -81,7 +79,7 @@ static int mmc_queue_thread(void *d) * has been finished. Do not assign it to previous * request. */ - if (cmd_flags & MMC_REQ_SPECIAL_MASK) + if (mmc_req_is_special(req)) mq->mqrq_cur->req = NULL; mq->mqrq_prev->brq.mrq.data = NULL; diff --git a/drivers/mmc/card/queue.h b/drivers/mmc/card/queue.h index 36cddab57d77..d62531124d54 100644 --- a/drivers/mmc/card/queue.h +++ b/drivers/mmc/card/queue.h @@ -1,7 +1,11 @@ #ifndef MMC_QUEUE_H #define MMC_QUEUE_H -#define MMC_REQ_SPECIAL_MASK (REQ_DISCARD | REQ_FLUSH) +static inline bool mmc_req_is_special(struct request *req) +{ + return req && + (req_op(req) == REQ_OP_FLUSH || req_op(req) == REQ_OP_DISCARD); +} struct request; struct task_struct; diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c index 74ae24364a8d..78b3eb45faf6 100644 --- a/drivers/mtd/mtd_blkdevs.c +++ b/drivers/mtd/mtd_blkdevs.c @@ -87,14 +87,14 @@ static int do_blktrans_request(struct mtd_blktrans_ops *tr, if (req->cmd_type != REQ_TYPE_FS) return -EIO; - if (req->cmd_flags & REQ_FLUSH) + if (req_op(req) == REQ_OP_FLUSH) return tr->flush(dev); if (blk_rq_pos(req) + blk_rq_cur_sectors(req) > get_capacity(req->rq_disk)) return -EIO; - if (req->cmd_flags & REQ_DISCARD) + if (req_op(req) == REQ_OP_DISCARD) return tr->discard(dev, block, nsect); if (rq_data_dir(req) == READ) { diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c index 608fc4464574..53b701b2f73e 100644 --- a/drivers/nvdimm/pmem.c +++ b/drivers/nvdimm/pmem.c @@ -283,6 +283,7 @@ static int pmem_attach_disk(struct device *dev, blk_queue_max_hw_sectors(q, UINT_MAX); blk_queue_bounce_limit(q, BLK_BOUNCE_ANY); queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q); + queue_flag_set_unlocked(QUEUE_FLAG_DAX, q); q->queuedata = pmem; disk = alloc_disk_node(0, nid); diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index d5fb55c0a9d9..1c5a032d490d 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -290,9 +290,9 @@ int nvme_setup_cmd(struct nvme_ns *ns, struct request *req, if (req->cmd_type == REQ_TYPE_DRV_PRIV) memcpy(cmd, req->cmd, sizeof(*cmd)); - else if (req->cmd_flags & REQ_FLUSH) + else if (req_op(req) == REQ_OP_FLUSH) nvme_setup_flush(ns, cmd); - else if (req->cmd_flags & REQ_DISCARD) + else if (req_op(req) == REQ_OP_DISCARD) ret = nvme_setup_discard(ns, req, cmd); else nvme_setup_rw(ns, req, cmd); diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index 1daa0482de0e..4d196d2d57da 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -177,7 +177,7 @@ static inline u64 nvme_block_nr(struct nvme_ns *ns, sector_t sector) static inline unsigned nvme_map_len(struct request *rq) { - if (rq->cmd_flags & REQ_DISCARD) + if (req_op(rq) == REQ_OP_DISCARD) return sizeof(struct nvme_dsm_range); else return blk_rq_bytes(rq); @@ -185,7 +185,7 @@ static inline unsigned nvme_map_len(struct request *rq) static inline void nvme_cleanup_cmd(struct request *req) { - if (req->cmd_flags & REQ_DISCARD) + if (req_op(req) == REQ_OP_DISCARD) kfree(req->completion_data); } diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c index bed53c46dd90..093e9e18e7e7 100644 --- a/drivers/s390/block/dcssblk.c +++ b/drivers/s390/block/dcssblk.c @@ -618,6 +618,7 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char dev_info->gd->driverfs_dev = &dev_info->dev; blk_queue_make_request(dev_info->dcssblk_queue, dcssblk_make_request); blk_queue_logical_block_size(dev_info->dcssblk_queue, 4096); + queue_flag_set_unlocked(QUEUE_FLAG_DAX, dev_info->dcssblk_queue); seg_byte_size = (dev_info->end - dev_info->start + 1); set_capacity(dev_info->gd, seg_byte_size >> 9); // size in sectors diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c index 3b11aad03752..daa4dc17f172 100644 --- a/drivers/scsi/osd/osd_initiator.c +++ b/drivers/scsi/osd/osd_initiator.c @@ -726,7 +726,7 @@ static int _osd_req_list_objects(struct osd_request *or, return PTR_ERR(bio); } - bio->bi_rw &= ~REQ_WRITE; + bio_set_op_attrs(bio, REQ_OP_READ, 0); or->in.bio = bio; or->in.total_bytes = bio->bi_iter.bi_size; return 0; @@ -824,7 +824,7 @@ void osd_req_write(struct osd_request *or, { _osd_req_encode_common(or, OSD_ACT_WRITE, obj, offset, len); WARN_ON(or->out.bio || or->out.total_bytes); - WARN_ON(0 == (bio->bi_rw & REQ_WRITE)); + WARN_ON(!op_is_write(bio_op(bio))); or->out.bio = bio; or->out.total_bytes = len; } @@ -839,7 +839,7 @@ int osd_req_write_kern(struct osd_request *or, if (IS_ERR(bio)) return PTR_ERR(bio); - bio->bi_rw |= REQ_WRITE; /* FIXME: bio_set_dir() */ + bio_set_op_attrs(bio, REQ_OP_WRITE, 0); osd_req_write(or, obj, offset, bio, len); return 0; } @@ -875,7 +875,7 @@ void osd_req_read(struct osd_request *or, { _osd_req_encode_common(or, OSD_ACT_READ, obj, offset, len); WARN_ON(or->in.bio || or->in.total_bytes); - WARN_ON(bio->bi_rw & REQ_WRITE); + WARN_ON(op_is_write(bio_op(bio))); or->in.bio = bio; or->in.total_bytes = len; } @@ -956,7 +956,7 @@ static int _osd_req_finalize_cdb_cont(struct osd_request *or, const u8 *cap_key) if (IS_ERR(bio)) return PTR_ERR(bio); - bio->bi_rw |= REQ_WRITE; + bio_set_op_attrs(bio, REQ_OP_WRITE, 0); /* integrity check the continuation before the bio is linked * with the other data segments since the continuation @@ -1077,7 +1077,7 @@ int osd_req_write_sg_kern(struct osd_request *or, if (IS_ERR(bio)) return PTR_ERR(bio); - bio->bi_rw |= REQ_WRITE; + bio_set_op_attrs(bio, REQ_OP_WRITE, 0); osd_req_write_sg(or, obj, bio, sglist, numentries); return 0; diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 60bff78e9ead..0609d6802d93 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -1012,7 +1012,8 @@ static int sd_setup_read_write_cmnd(struct scsi_cmnd *SCpnt) } else if (rq_data_dir(rq) == READ) { SCpnt->cmnd[0] = READ_6; } else { - scmd_printk(KERN_ERR, SCpnt, "Unknown command %llx\n", (unsigned long long) rq->cmd_flags); + scmd_printk(KERN_ERR, SCpnt, "Unknown command %llu,%llx\n", + req_op(rq), (unsigned long long) rq->cmd_flags); goto out; } @@ -1137,21 +1138,26 @@ static int sd_init_command(struct scsi_cmnd *cmd) { struct request *rq = cmd->request; - if (rq->cmd_flags & REQ_DISCARD) + switch (req_op(rq)) { + case REQ_OP_DISCARD: return sd_setup_discard_cmnd(cmd); - else if (rq->cmd_flags & REQ_WRITE_SAME) + case REQ_OP_WRITE_SAME: return sd_setup_write_same_cmnd(cmd); - else if (rq->cmd_flags & REQ_FLUSH) + case REQ_OP_FLUSH: return sd_setup_flush_cmnd(cmd); - else + case REQ_OP_READ: + case REQ_OP_WRITE: return sd_setup_read_write_cmnd(cmd); + default: + BUG(); + } } static void sd_uninit_command(struct scsi_cmnd *SCpnt) { struct request *rq = SCpnt->request; - if (rq->cmd_flags & REQ_DISCARD) + if (req_op(rq) == REQ_OP_DISCARD) __free_page(rq->completion_data); if (SCpnt->cmnd != rq->cmd) { @@ -1774,7 +1780,7 @@ static int sd_done(struct scsi_cmnd *SCpnt) unsigned char op = SCpnt->cmnd[0]; unsigned char unmap = SCpnt->cmnd[1] & 8; - if (req->cmd_flags & REQ_DISCARD || req->cmd_flags & REQ_WRITE_SAME) { + if (req_op(req) == REQ_OP_DISCARD || req_op(req) == REQ_OP_WRITE_SAME) { if (!result) { good_bytes = blk_rq_bytes(req); scsi_set_resid(SCpnt, 0); diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c index 7c4efb4417b0..22af12f8b8eb 100644 --- a/drivers/target/target_core_iblock.c +++ b/drivers/target/target_core_iblock.c @@ -312,7 +312,8 @@ static void iblock_bio_done(struct bio *bio) } static struct bio * -iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num) +iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num, int op, + int op_flags) { struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev); struct bio *bio; @@ -334,18 +335,19 @@ iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num) bio->bi_private = cmd; bio->bi_end_io = &iblock_bio_done; bio->bi_iter.bi_sector = lba; + bio_set_op_attrs(bio, op, op_flags); return bio; } -static void iblock_submit_bios(struct bio_list *list, int rw) +static void iblock_submit_bios(struct bio_list *list) { struct blk_plug plug; struct bio *bio; blk_start_plug(&plug); while ((bio = bio_list_pop(list))) - submit_bio(rw, bio); + submit_bio(bio); blk_finish_plug(&plug); } @@ -387,9 +389,10 @@ iblock_execute_sync_cache(struct se_cmd *cmd) bio = bio_alloc(GFP_KERNEL, 0); bio->bi_end_io = iblock_end_io_flush; bio->bi_bdev = ib_dev->ibd_bd; + bio->bi_rw = WRITE_FLUSH; if (!immed) bio->bi_private = cmd; - submit_bio(WRITE_FLUSH, bio); + submit_bio(bio); return 0; } @@ -478,7 +481,7 @@ iblock_execute_write_same(struct se_cmd *cmd) goto fail; cmd->priv = ibr; - bio = iblock_get_bio(cmd, block_lba, 1); + bio = iblock_get_bio(cmd, block_lba, 1, REQ_OP_WRITE, 0); if (!bio) goto fail_free_ibr; @@ -491,7 +494,8 @@ iblock_execute_write_same(struct se_cmd *cmd) while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset) != sg->length) { - bio = iblock_get_bio(cmd, block_lba, 1); + bio = iblock_get_bio(cmd, block_lba, 1, REQ_OP_WRITE, + 0); if (!bio) goto fail_put_bios; @@ -504,7 +508,7 @@ iblock_execute_write_same(struct se_cmd *cmd) sectors -= 1; } - iblock_submit_bios(&list, WRITE); + iblock_submit_bios(&list); return 0; fail_put_bios: @@ -677,8 +681,7 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, struct scatterlist *sg; u32 sg_num = sgl_nents; unsigned bio_cnt; - int rw = 0; - int i; + int i, op, op_flags = 0; if (data_direction == DMA_TO_DEVICE) { struct iblock_dev *ib_dev = IBLOCK_DEV(dev); @@ -687,18 +690,15 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, * Force writethrough using WRITE_FUA if a volatile write cache * is not enabled, or if initiator set the Force Unit Access bit. */ + op = REQ_OP_WRITE; if (test_bit(QUEUE_FLAG_FUA, &q->queue_flags)) { if (cmd->se_cmd_flags & SCF_FUA) - rw = WRITE_FUA; + op_flags = WRITE_FUA; else if (!test_bit(QUEUE_FLAG_WC, &q->queue_flags)) - rw = WRITE_FUA; - else - rw = WRITE; - } else { - rw = WRITE; + op_flags = WRITE_FUA; } } else { - rw = READ; + op = REQ_OP_READ; } ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL); @@ -712,7 +712,7 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, return 0; } - bio = iblock_get_bio(cmd, block_lba, sgl_nents); + bio = iblock_get_bio(cmd, block_lba, sgl_nents, op, op_flags); if (!bio) goto fail_free_ibr; @@ -732,11 +732,12 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset) != sg->length) { if (bio_cnt >= IBLOCK_MAX_BIO_PER_TASK) { - iblock_submit_bios(&list, rw); + iblock_submit_bios(&list); bio_cnt = 0; } - bio = iblock_get_bio(cmd, block_lba, sg_num); + bio = iblock_get_bio(cmd, block_lba, sg_num, op, + op_flags); if (!bio) goto fail_put_bios; @@ -756,7 +757,7 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, goto fail_put_bios; } - iblock_submit_bios(&list, rw); + iblock_submit_bios(&list); iblock_complete_cmd(cmd); return 0; diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c index de18790eb21c..81564c87f24b 100644 --- a/drivers/target/target_core_pscsi.c +++ b/drivers/target/target_core_pscsi.c @@ -922,7 +922,7 @@ pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, goto fail; if (rw) - bio->bi_rw |= REQ_WRITE; + bio_set_op_attrs(bio, REQ_OP_WRITE, 0); pr_debug("PSCSI: Allocated bio: %p," " dir: %s nr_vecs: %d\n", bio, |