diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2022-03-22 02:48:55 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2022-03-22 02:48:55 +0300 |
commit | 616355cc818c6ddadc393fdfd4491f94458cb715 (patch) | |
tree | a96907b179ccbeb84cd1df5515cd10c90eb5bd59 /drivers | |
parent | b080cee72ef355669cbc52ff55dc513d37433600 (diff) | |
parent | 8f9e7b65f833cb9a4b2e2f54a049d74df394d906 (diff) | |
download | linux-616355cc818c6ddadc393fdfd4491f94458cb715.tar.xz |
Merge tag 'for-5.18/block-2022-03-18' of git://git.kernel.dk/linux-block
Pull block updates from Jens Axboe:
- BFQ cleanups and fixes (Yu, Zhang, Yahu, Paolo)
- blk-rq-qos completion fix (Tejun)
- blk-cgroup merge fix (Tejun)
- Add offline error return value to distinguish it from an IO error on
the device (Song)
- IO stats fixes (Zhang, Christoph)
- blkcg refcount fixes (Ming, Yu)
- Fix for indefinite dispatch loop softlockup (Shin'ichiro)
- blk-mq hardware queue management improvements (Ming)
- sbitmap dead code removal (Ming, John)
- Plugging merge improvements (me)
- Show blk-crypto capabilities in sysfs (Eric)
- Multiple delayed queue run improvement (David)
- Block throttling fixes (Ming)
- Start deprecating auto module loading based on dev_t (Christoph)
- bio allocation improvements (Christoph, Chaitanya)
- Get rid of bio_devname (Christoph)
- bio clone improvements (Christoph)
- Block plugging improvements (Christoph)
- Get rid of genhd.h header (Christoph)
- Ensure drivers use appropriate flush helpers (Christoph)
- Refcounting improvements (Christoph)
- Queue initialization and teardown improvements (Ming, Christoph)
- Misc fixes/improvements (Barry, Chaitanya, Colin, Dan, Jiapeng,
Lukas, Nian, Yang, Eric, Chengming)
* tag 'for-5.18/block-2022-03-18' of git://git.kernel.dk/linux-block: (127 commits)
block: cancel all throttled bios in del_gendisk()
block: let blkcg_gq grab request queue's refcnt
block: avoid use-after-free on throttle data
block: limit request dispatch loop duration
block/bfq-iosched: Fix spelling mistake "tenative" -> "tentative"
sr: simplify the local variable initialization in sr_block_open()
block: don't merge across cgroup boundaries if blkcg is enabled
block: fix rq-qos breakage from skipping rq_qos_done_bio()
block: flush plug based on hardware and software queue order
block: ensure plug merging checks the correct queue at least once
block: move rq_qos_exit() into disk_release()
block: do more work in elevator_exit
block: move blk_exit_queue into disk_release
block: move q_usage_counter release into blk_queue_release
block: don't remove hctx debugfs dir from blk_mq_exit_queue
block: move blkcg initialization/destroy into disk allocation/release handler
sr: implement ->free_disk to simplify refcounting
sd: implement ->free_disk to simplify refcounting
sd: delay calling free_opal_dev
sd: call sd_zbc_release_disk before releasing the scsi_device reference
...
Diffstat (limited to 'drivers')
84 files changed, 433 insertions, 1026 deletions
diff --git a/drivers/base/class.c b/drivers/base/class.c index 7476f393df97..8feb85e186e3 100644 --- a/drivers/base/class.c +++ b/drivers/base/class.c @@ -16,7 +16,7 @@ #include <linux/kdev_t.h> #include <linux/err.h> #include <linux/slab.h> -#include <linux/genhd.h> +#include <linux/blkdev.h> #include <linux/mutex.h> #include "base.h" diff --git a/drivers/base/core.c b/drivers/base/core.c index 7bb957b11861..3d6430eb0c6a 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -21,7 +21,7 @@ #include <linux/notifier.h> #include <linux/of.h> #include <linux/of_device.h> -#include <linux/genhd.h> +#include <linux/blkdev.h> #include <linux/mutex.h> #include <linux/pm_runtime.h> #include <linux/netdevice.h> diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c index f41063ac1aee..db5a03a0618e 100644 --- a/drivers/base/devtmpfs.c +++ b/drivers/base/devtmpfs.c @@ -17,7 +17,7 @@ #include <linux/syscalls.h> #include <linux/mount.h> #include <linux/device.h> -#include <linux/genhd.h> +#include <linux/blkdev.h> #include <linux/namei.h> #include <linux/fs.h> #include <linux/shmem_fs.h> diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c index 52484bcdedb9..8a91fcac6f82 100644 --- a/drivers/block/aoe/aoeblk.c +++ b/drivers/block/aoe/aoeblk.c @@ -12,7 +12,6 @@ #include <linux/ioctl.h> #include <linux/slab.h> #include <linux/ratelimit.h> -#include <linux/genhd.h> #include <linux/netdevice.h> #include <linux/mutex.h> #include <linux/export.h> diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c index 6af111f568e4..cc11f89a0928 100644 --- a/drivers/block/aoe/aoecmd.c +++ b/drivers/block/aoe/aoecmd.c @@ -10,7 +10,6 @@ #include <linux/blk-mq.h> #include <linux/skbuff.h> #include <linux/netdevice.h> -#include <linux/genhd.h> #include <linux/moduleparam.h> #include <linux/workqueue.h> #include <linux/kthread.h> diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c index 72cf7603d51f..f5bcded3640d 100644 --- a/drivers/block/drbd/drbd_actlog.c +++ b/drivers/block/drbd/drbd_actlog.c @@ -138,15 +138,14 @@ static int _drbd_md_sync_page_io(struct drbd_device *device, op_flags |= REQ_FUA | REQ_PREFLUSH; op_flags |= REQ_SYNC; - bio = bio_alloc_bioset(GFP_NOIO, 1, &drbd_md_io_bio_set); - bio_set_dev(bio, bdev->md_bdev); + bio = bio_alloc_bioset(bdev->md_bdev, 1, op | op_flags, GFP_NOIO, + &drbd_md_io_bio_set); bio->bi_iter.bi_sector = sector; err = -EIO; if (bio_add_page(bio, device->md_io.page, size, 0) != size) goto out; bio->bi_private = device; bio->bi_end_io = drbd_md_endio; - bio_set_op_attrs(bio, op, op_flags); if (op != REQ_OP_WRITE && device->state.disk == D_DISKLESS && device->ldev == NULL) /* special case, drbd_md_read() during drbd_adm_attach(): no get_ldev */ diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c index c1f816f896a8..df25eecf80af 100644 --- a/drivers/block/drbd/drbd_bitmap.c +++ b/drivers/block/drbd/drbd_bitmap.c @@ -976,12 +976,13 @@ static void drbd_bm_endio(struct bio *bio) static void bm_page_io_async(struct drbd_bm_aio_ctx *ctx, int page_nr) __must_hold(local) { - struct bio *bio = bio_alloc_bioset(GFP_NOIO, 1, &drbd_md_io_bio_set); struct drbd_device *device = ctx->device; + unsigned int op = (ctx->flags & BM_AIO_READ) ? REQ_OP_READ : REQ_OP_WRITE; + struct bio *bio = bio_alloc_bioset(device->ldev->md_bdev, 1, op, + GFP_NOIO, &drbd_md_io_bio_set); struct drbd_bitmap *b = device->bitmap; struct page *page; unsigned int len; - unsigned int op = (ctx->flags & BM_AIO_READ) ? REQ_OP_READ : REQ_OP_WRITE; sector_t on_disk_sector = device->ldev->md.md_offset + device->ldev->md.bm_offset; @@ -1006,14 +1007,12 @@ static void bm_page_io_async(struct drbd_bm_aio_ctx *ctx, int page_nr) __must_ho bm_store_page_idx(page, page_nr); } else page = b->bm_pages[page_nr]; - bio_set_dev(bio, device->ldev->md_bdev); bio->bi_iter.bi_sector = on_disk_sector; /* bio_add_page of a single page to an empty bio will always succeed, * according to api. Do we want to assert that? */ bio_add_page(bio, page, len, 0); bio->bi_private = ctx; bio->bi_end_io = drbd_bm_endio; - bio_set_op_attrs(bio, op, 0); if (drbd_insert_fault(device, (op == REQ_OP_WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD)) { bio_io_error(bio); diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h index f27d5b0f9a0b..acb1ad3c0603 100644 --- a/drivers/block/drbd/drbd_int.h +++ b/drivers/block/drbd/drbd_int.h @@ -27,7 +27,6 @@ #include <linux/major.h> #include <linux/blkdev.h> #include <linux/backing-dev.h> -#include <linux/genhd.h> #include <linux/idr.h> #include <linux/dynamic_debug.h> #include <net/tcp.h> diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index 6df2539e215b..04e3ec12d8b4 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c @@ -1279,16 +1279,16 @@ static void one_flush_endio(struct bio *bio) static void submit_one_flush(struct drbd_device *device, struct issue_flush_context *ctx) { - struct bio *bio = bio_alloc(GFP_NOIO, 0); + struct bio *bio = bio_alloc(device->ldev->backing_bdev, 0, + REQ_OP_FLUSH | REQ_PREFLUSH, GFP_NOIO); struct one_flush_context *octx = kmalloc(sizeof(*octx), GFP_NOIO); - if (!bio || !octx) { - drbd_warn(device, "Could not allocate a bio, CANNOT ISSUE FLUSH\n"); + + if (!octx) { + drbd_warn(device, "Could not allocate a octx, CANNOT ISSUE FLUSH\n"); /* FIXME: what else can I do now? disconnecting or detaching * really does not help to improve the state of the world, either. */ - kfree(octx); - if (bio) - bio_put(bio); + bio_put(bio); ctx->error = -ENOMEM; put_ldev(device); @@ -1298,10 +1298,8 @@ static void submit_one_flush(struct drbd_device *device, struct issue_flush_cont octx->device = device; octx->ctx = ctx; - bio_set_dev(bio, device->ldev->backing_bdev); bio->bi_private = octx; bio->bi_end_io = one_flush_endio; - bio->bi_opf = REQ_OP_FLUSH | REQ_PREFLUSH; device->flush_jif = jiffies; set_bit(FLUSH_PENDING, &device->flags); @@ -1646,7 +1644,6 @@ int drbd_submit_peer_request(struct drbd_device *device, unsigned data_size = peer_req->i.size; unsigned n_bios = 0; unsigned nr_pages = (data_size + PAGE_SIZE -1) >> PAGE_SHIFT; - int err = -ENOMEM; /* TRIM/DISCARD: for now, always use the helper function * blkdev_issue_zeroout(..., discard=true). @@ -1687,15 +1684,10 @@ int drbd_submit_peer_request(struct drbd_device *device, * generated bio, but a bio allocated on behalf of the peer. */ next_bio: - bio = bio_alloc(GFP_NOIO, nr_pages); - if (!bio) { - drbd_err(device, "submit_ee: Allocation of a bio failed (nr_pages=%u)\n", nr_pages); - goto fail; - } + bio = bio_alloc(device->ldev->backing_bdev, nr_pages, op | op_flags, + GFP_NOIO); /* > peer_req->i.sector, unless this is the first bio */ bio->bi_iter.bi_sector = sector; - bio_set_dev(bio, device->ldev->backing_bdev); - bio_set_op_attrs(bio, op, op_flags); bio->bi_private = peer_req; bio->bi_end_io = drbd_peer_request_endio; @@ -1726,14 +1718,6 @@ next_bio: drbd_submit_bio_noacct(device, fault_type, bio); } while (bios); return 0; - -fail: - while (bios) { - bio = bios; - bios = bios->bi_next; - bio_put(bio); - } - return err; } static void drbd_remove_epoch_entry_interval(struct drbd_device *device, diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c index 3235532ae077..c00ae8619519 100644 --- a/drivers/block/drbd/drbd_req.c +++ b/drivers/block/drbd/drbd_req.c @@ -30,7 +30,8 @@ static struct drbd_request *drbd_req_new(struct drbd_device *device, struct bio return NULL; memset(req, 0, sizeof(*req)); - req->private_bio = bio_clone_fast(bio_src, GFP_NOIO, &drbd_io_bio_set); + req->private_bio = bio_alloc_clone(device->ldev->backing_bdev, bio_src, + GFP_NOIO, &drbd_io_bio_set); req->private_bio->bi_private = req; req->private_bio->bi_end_io = drbd_request_endio; @@ -1151,8 +1152,6 @@ drbd_submit_req_private_bio(struct drbd_request *req) else type = DRBD_FAULT_DT_RD; - bio_set_dev(bio, device->ldev->backing_bdev); - /* State may have changed since we grabbed our reference on the * ->ldev member. Double check, and short-circuit to endio. * In case the last activity log transaction failed to get on diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c index 64563bfdf0da..a5e04b38006b 100644 --- a/drivers/block/drbd/drbd_worker.c +++ b/drivers/block/drbd/drbd_worker.c @@ -1523,9 +1523,9 @@ int w_restart_disk_io(struct drbd_work *w, int cancel) if (bio_data_dir(req->master_bio) == WRITE && req->rq_state & RQ_IN_ACT_LOG) drbd_al_begin_io(device, &req->i); - req->private_bio = bio_clone_fast(req->master_bio, GFP_NOIO, + req->private_bio = bio_alloc_clone(device->ldev->backing_bdev, + req->master_bio, GFP_NOIO, &drbd_io_bio_set); - bio_set_dev(req->private_bio, device->ldev->backing_bdev); req->private_bio->bi_private = req; req->private_bio->bi_end_io = drbd_request_endio; submit_bio_noacct(req->private_bio); diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index e611411a934c..19c2d0327e15 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c @@ -4129,15 +4129,13 @@ static int __floppy_read_block_0(struct block_device *bdev, int drive) cbdata.drive = drive; - bio_init(&bio, &bio_vec, 1); - bio_set_dev(&bio, bdev); + bio_init(&bio, bdev, &bio_vec, 1, REQ_OP_READ); bio_add_page(&bio, page, block_size(bdev), 0); bio.bi_iter.bi_sector = 0; bio.bi_flags |= (1 << BIO_QUIET); bio.bi_private = &cbdata; bio.bi_end_io = floppy_rb0_cb; - bio_set_op_attrs(&bio, REQ_OP_READ, 0); init_completion(&cbdata.complete); diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c index 2b588b62cbbb..4fbaf0b4958b 100644 --- a/drivers/block/mtip32xx/mtip32xx.c +++ b/drivers/block/mtip32xx/mtip32xx.c @@ -19,7 +19,6 @@ #include <linux/compat.h> #include <linux/fs.h> #include <linux/module.h> -#include <linux/genhd.h> #include <linux/blkdev.h> #include <linux/blk-mq.h> #include <linux/bio.h> @@ -161,9 +160,7 @@ static bool mtip_check_surprise_removal(struct driver_data *dd) static struct mtip_cmd *mtip_cmd_from_tag(struct driver_data *dd, unsigned int tag) { - struct blk_mq_hw_ctx *hctx = dd->queue->queue_hw_ctx[0]; - - return blk_mq_rq_to_pdu(blk_mq_tag_to_rq(hctx->tags, tag)); + return blk_mq_rq_to_pdu(blk_mq_tag_to_rq(dd->tags.tags[0], tag)); } /* diff --git a/drivers/block/mtip32xx/mtip32xx.h b/drivers/block/mtip32xx/mtip32xx.h index 88f4206310e4..6816beb45352 100644 --- a/drivers/block/mtip32xx/mtip32xx.h +++ b/drivers/block/mtip32xx/mtip32xx.h @@ -15,7 +15,6 @@ #include <linux/rwsem.h> #include <linux/ata.h> #include <linux/interrupt.h> -#include <linux/genhd.h> /* Offset of Subsystem Device ID in pci confoguration space */ #define PCI_SUBSYSTEM_DEVICEID 0x2E diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c index 2b6b70a39e76..e745fc29e55d 100644 --- a/drivers/block/pktcdvd.c +++ b/drivers/block/pktcdvd.c @@ -1020,9 +1020,8 @@ static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt) continue; bio = pkt->r_bios[f]; - bio_reset(bio); + bio_reset(bio, pd->bdev, REQ_OP_READ); bio->bi_iter.bi_sector = pkt->sector + f * (CD_FRAMESIZE >> 9); - bio_set_dev(bio, pd->bdev); bio->bi_end_io = pkt_end_io_read; bio->bi_private = pkt; @@ -1034,7 +1033,6 @@ static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt) BUG(); atomic_inc(&pkt->io_wait); - bio_set_op_attrs(bio, REQ_OP_READ, 0); pkt_queue_bio(pd, bio); frames_read++; } @@ -1235,9 +1233,8 @@ static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt) { int f; - bio_reset(pkt->w_bio); + bio_reset(pkt->w_bio, pd->bdev, REQ_OP_WRITE); pkt->w_bio->bi_iter.bi_sector = pkt->sector; - bio_set_dev(pkt->w_bio, pd->bdev); pkt->w_bio->bi_end_io = pkt_end_io_packet_write; pkt->w_bio->bi_private = pkt; @@ -1270,7 +1267,6 @@ static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt) /* Start the write request */ atomic_set(&pkt->io_wait, 1); - bio_set_op_attrs(pkt->w_bio, REQ_OP_WRITE, 0); pkt_queue_bio(pd, pkt->w_bio); } @@ -2298,12 +2294,12 @@ static void pkt_end_io_read_cloned(struct bio *bio) static void pkt_make_request_read(struct pktcdvd_device *pd, struct bio *bio) { - struct bio *cloned_bio = bio_clone_fast(bio, GFP_NOIO, &pkt_bio_set); + struct bio *cloned_bio = + bio_alloc_clone(pd->bdev, bio, GFP_NOIO, &pkt_bio_set); struct packet_stacked_data *psd = mempool_alloc(&psd_pool, GFP_NOIO); psd->pd = pd; psd->bio = bio; - bio_set_dev(cloned_bio, pd->bdev); cloned_bio->bi_private = psd; cloned_bio->bi_end_io = pkt_end_io_read_cloned; pd->stats.secs_r += bio_sectors(bio); @@ -2404,18 +2400,11 @@ static void pkt_make_request_write(struct request_queue *q, struct bio *bio) static void pkt_submit_bio(struct bio *bio) { - struct pktcdvd_device *pd; - char b[BDEVNAME_SIZE]; + struct pktcdvd_device *pd = bio->bi_bdev->bd_disk->queue->queuedata; struct bio *split; blk_queue_split(&bio); - pd = bio->bi_bdev->bd_disk->queue->queuedata; - if (!pd) { - pr_err("%s incorrect request queue\n", bio_devname(bio, b)); - goto end_io; - } - pkt_dbg(2, pd, "start = %6llx stop = %6llx\n", (unsigned long long)bio->bi_iter.bi_sector, (unsigned long long)bio_end_sector(bio)); diff --git a/drivers/block/rnbd/rnbd-clt.c b/drivers/block/rnbd/rnbd-clt.c index c08971de369f..58304f978e10 100644 --- a/drivers/block/rnbd/rnbd-clt.c +++ b/drivers/block/rnbd/rnbd-clt.c @@ -1343,7 +1343,7 @@ static inline void rnbd_init_hw_queue(struct rnbd_clt_dev *dev, static void rnbd_init_mq_hw_queues(struct rnbd_clt_dev *dev) { - int i; + unsigned long i; struct blk_mq_hw_ctx *hctx; struct rnbd_queue *q; diff --git a/drivers/block/rnbd/rnbd-srv-dev.c b/drivers/block/rnbd/rnbd-srv-dev.c index b241a099aeae..c5d0a0391165 100644 --- a/drivers/block/rnbd/rnbd-srv-dev.c +++ b/drivers/block/rnbd/rnbd-srv-dev.c @@ -12,8 +12,7 @@ #include "rnbd-srv-dev.h" #include "rnbd-log.h" -struct rnbd_dev *rnbd_dev_open(const char *path, fmode_t flags, - struct bio_set *bs) +struct rnbd_dev *rnbd_dev_open(const char *path, fmode_t flags) { struct rnbd_dev *dev; int ret; @@ -30,7 +29,6 @@ struct rnbd_dev *rnbd_dev_open(const char *path, fmode_t flags, dev->blk_open_flags = flags; bdevname(dev->bdev, dev->name); - dev->ibd_bio_set = bs; return dev; @@ -44,60 +42,3 @@ void rnbd_dev_close(struct rnbd_dev *dev) blkdev_put(dev->bdev, dev->blk_open_flags); kfree(dev); } - -void rnbd_dev_bi_end_io(struct bio *bio) -{ - struct rnbd_dev_blk_io *io = bio->bi_private; - - rnbd_endio(io->priv, blk_status_to_errno(bio->bi_status)); - bio_put(bio); -} - -/** - * rnbd_bio_map_kern - map kernel address into bio - * @data: pointer to buffer to map - * @bs: bio_set to use. - * @len: length in bytes - * @gfp_mask: allocation flags for bio allocation - * - * Map the kernel address into a bio suitable for io to a block - * device. Returns an error pointer in case of error. - */ -struct bio *rnbd_bio_map_kern(void *data, struct bio_set *bs, - unsigned int len, gfp_t gfp_mask) -{ - unsigned long kaddr = (unsigned long)data; - unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT; - unsigned long start = kaddr >> PAGE_SHIFT; - const int nr_pages = end - start; - int offset, i; - struct bio *bio; - - bio = bio_alloc_bioset(gfp_mask, nr_pages, bs); - if (!bio) - return ERR_PTR(-ENOMEM); - - offset = offset_in_page(kaddr); - for (i = 0; i < nr_pages; i++) { - unsigned int bytes = PAGE_SIZE - offset; - - if (len <= 0) - break; - - if (bytes > len) - bytes = len; - - if (bio_add_page(bio, virt_to_page(data), bytes, - offset) < bytes) { - /* we don't support partial mappings */ - bio_put(bio); - return ERR_PTR(-EINVAL); - } - - data += bytes; - len -= bytes; - offset = 0; - } - - return bio; -} diff --git a/drivers/block/rnbd/rnbd-srv-dev.h b/drivers/block/rnbd/rnbd-srv-dev.h index 0eb23850afb9..2c3df02b5e8e 100644 --- a/drivers/block/rnbd/rnbd-srv-dev.h +++ b/drivers/block/rnbd/rnbd-srv-dev.h @@ -14,25 +14,16 @@ struct rnbd_dev { struct block_device *bdev; - struct bio_set *ibd_bio_set; fmode_t blk_open_flags; char name[BDEVNAME_SIZE]; }; -struct rnbd_dev_blk_io { - struct rnbd_dev *dev; - void *priv; - /* have to be last member for front_pad usage of bioset_init */ - struct bio bio; -}; - /** * rnbd_dev_open() - Open a device + * @path: path to open * @flags: open flags - * @bs: bio_set to use during block io, */ -struct rnbd_dev *rnbd_dev_open(const char *path, fmode_t flags, - struct bio_set *bs); +struct rnbd_dev *rnbd_dev_open(const char *path, fmode_t flags); /** * rnbd_dev_close() - Close a device @@ -41,11 +32,6 @@ void rnbd_dev_close(struct rnbd_dev *dev); void rnbd_endio(void *priv, int error); -void rnbd_dev_bi_end_io(struct bio *bio); - -struct bio *rnbd_bio_map_kern(void *data, struct bio_set *bs, - unsigned int len, gfp_t gfp_mask); - static inline int rnbd_dev_get_max_segs(const struct rnbd_dev *dev) { return queue_max_segments(bdev_get_queue(dev->bdev)); diff --git a/drivers/block/rnbd/rnbd-srv-sysfs.c b/drivers/block/rnbd/rnbd-srv-sysfs.c index 4db98e0e76f0..feaa76c5a342 100644 --- a/drivers/block/rnbd/rnbd-srv-sysfs.c +++ b/drivers/block/rnbd/rnbd-srv-sysfs.c @@ -13,7 +13,6 @@ #include <linux/kobject.h> #include <linux/sysfs.h> #include <linux/stat.h> -#include <linux/genhd.h> #include <linux/list.h> #include <linux/moduleparam.h> #include <linux/device.h> diff --git a/drivers/block/rnbd/rnbd-srv.c b/drivers/block/rnbd/rnbd-srv.c index 1ee808fc600c..132e950685d5 100644 --- a/drivers/block/rnbd/rnbd-srv.c +++ b/drivers/block/rnbd/rnbd-srv.c @@ -114,6 +114,12 @@ rnbd_get_sess_dev(int dev_id, struct rnbd_srv_session *srv_sess) return sess_dev; } +static void rnbd_dev_bi_end_io(struct bio *bio) +{ + rnbd_endio(bio->bi_private, blk_status_to_errno(bio->bi_status)); + bio_put(bio); +} + static int process_rdma(struct rnbd_srv_session *srv_sess, struct rtrs_srv_op *id, void *data, u32 datalen, const void *usr, size_t usrlen) @@ -123,7 +129,6 @@ static int process_rdma(struct rnbd_srv_session *srv_sess, struct rnbd_srv_sess_dev *sess_dev; u32 dev_id; int err; - struct rnbd_dev_blk_io *io; struct bio *bio; short prio; @@ -144,33 +149,29 @@ static int process_rdma(struct rnbd_srv_session *srv_sess, priv->sess_dev = sess_dev; priv->id = id; - /* Generate bio with pages pointing to the rdma buffer */ - bio = rnbd_bio_map_kern(data, sess_dev->rnbd_dev->ibd_bio_set, datalen, GFP_KERNEL); - if (IS_ERR(bio)) { - err = PTR_ERR(bio); - rnbd_srv_err(sess_dev, "Failed to generate bio, err: %d\n", err); - goto sess_dev_put; + bio = bio_alloc(sess_dev->rnbd_dev->bdev, 1, + rnbd_to_bio_flags(le32_to_cpu(msg->rw)), GFP_KERNEL); + if (bio_add_page(bio, virt_to_page(data), datalen, + offset_in_page(data)) != datalen) { + rnbd_srv_err(sess_dev, "Failed to map data to bio\n"); + err = -EINVAL; + goto bio_put; } - io = container_of(bio, struct rnbd_dev_blk_io, bio); - io->dev = sess_dev->rnbd_dev; - io->priv = priv; - bio->bi_end_io = rnbd_dev_bi_end_io; - bio->bi_private = io; - bio->bi_opf = rnbd_to_bio_flags(le32_to_cpu(msg->rw)); + bio->bi_private = priv; bio->bi_iter.bi_sector = le64_to_cpu(msg->sector); bio->bi_iter.bi_size = le32_to_cpu(msg->bi_size); prio = srv_sess->ver < RNBD_PROTO_VER_MAJOR || usrlen < sizeof(*msg) ? 0 : le16_to_cpu(msg->prio); bio_set_prio(bio, prio); - bio_set_dev(bio, sess_dev->rnbd_dev->bdev); submit_bio(bio); return 0; -sess_dev_put: +bio_put: + bio_put(bio); rnbd_put_sess_dev(sess_dev); err: kfree(priv); @@ -251,7 +252,6 @@ static void destroy_sess(struct rnbd_srv_session *srv_sess) out: xa_destroy(&srv_sess->index_idr); - bioset_exit(&srv_sess->sess_bio_set); pr_info("RTRS Session %s disconnected\n", srv_sess->sessname); @@ -280,16 +280,6 @@ static int create_sess(struct rtrs_srv_sess *rtrs) return -ENOMEM; srv_sess->queue_depth = rtrs_srv_get_queue_depth(rtrs); - err = bioset_init(&srv_sess->sess_bio_set, srv_sess->queue_depth, - offsetof(struct rnbd_dev_blk_io, bio), - BIOSET_NEED_BVECS); - if (err) { - pr_err("Allocating srv_session for path %s failed\n", - pathname); - kfree(srv_sess); - return err; - } - xa_init_flags(&srv_sess->index_idr, XA_FLAGS_ALLOC); INIT_LIST_HEAD(&srv_sess->sess_dev_list); mutex_init(&srv_sess->lock); @@ -738,8 +728,7 @@ static int process_msg_open(struct rnbd_srv_session *srv_sess, goto reject; } - rnbd_dev = rnbd_dev_open(full_path, open_flags, - &srv_sess->sess_bio_set); + rnbd_dev = rnbd_dev_open(full_path, open_flags); if (IS_ERR(rnbd_dev)) { pr_err("Opening device '%s' on session %s failed, failed to open the block device, err: %ld\n", full_path, srv_sess->sessname, PTR_ERR(rnbd_dev)); diff --git a/drivers/block/rnbd/rnbd-srv.h b/drivers/block/rnbd/rnbd-srv.h index e5604bce123a..be2ae486d407 100644 --- a/drivers/block/rnbd/rnbd-srv.h +++ b/drivers/block/rnbd/rnbd-srv.h @@ -23,7 +23,6 @@ struct rnbd_srv_session { struct rtrs_srv_sess *rtrs; char sessname[NAME_MAX]; int queue_depth; - struct bio_set sess_bio_set; struct xarray index_idr; /* List of struct rnbd_srv_sess_dev */ diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c index 146d85d80e0e..dd0a1a6fed29 100644 --- a/drivers/block/sunvdc.c +++ b/drivers/block/sunvdc.c @@ -9,7 +9,6 @@ #include <linux/types.h> #include <linux/blk-mq.h> #include <linux/hdreg.h> -#include <linux/genhd.h> #include <linux/cdrom.h> #include <linux/slab.h> #include <linux/spinlock.h> diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 8c415be86732..35399a9eae0e 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -69,13 +69,6 @@ struct virtio_blk { /* Process context for config space updates */ struct work_struct config_work; - /* - * Tracks references from block_device_operations open/release and - * virtio_driver probe/remove so this object can be freed once no - * longer in use. - */ - refcount_t refs; - /* Ida index - used to track minor number allocations. */ int index; @@ -386,43 +379,6 @@ out: return err; } -static void virtblk_get(struct virtio_blk *vblk) -{ - refcount_inc(&vblk->refs); -} - -static void virtblk_put(struct virtio_blk *vblk) -{ - if (refcount_dec_and_test(&vblk->refs)) { - ida_simple_remove(&vd_index_ida, vblk->index); - mutex_destroy(&vblk->vdev_mutex); - kfree(vblk); - } -} - -static int virtblk_open(struct block_device *bd, fmode_t mode) -{ - struct virtio_blk *vblk = bd->bd_disk->private_data; - int ret = 0; - - mutex_lock(&vblk->vdev_mutex); - - if (vblk->vdev) - virtblk_get(vblk); - else - ret = -ENXIO; - - mutex_unlock(&vblk->vdev_mutex); - return ret; -} - -static void virtblk_release(struct gendisk *disk, fmode_t mode) -{ - struct virtio_blk *vblk = disk->private_data; - - virtblk_put(vblk); -} - /* We provide getgeo only to please some old bootloader/partitioning tools */ static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo) { @@ -455,11 +411,19 @@ out: return ret; } +static void virtblk_free_disk(struct gendisk *disk) +{ + struct virtio_blk *vblk = disk->private_data; + + ida_simple_remove(&vd_index_ida, vblk->index); + mutex_destroy(&vblk->vdev_mutex); + kfree(vblk); +} + static const struct block_device_operations virtblk_fops = { - .owner = THIS_MODULE, - .open = virtblk_open, - .release = virtblk_release, - .getgeo = virtblk_getgeo, + .owner = THIS_MODULE, + .getgeo = virtblk_getgeo, + .free_disk = virtblk_free_disk, }; static int index_to_minor(int index) @@ -784,8 +748,6 @@ static int virtblk_probe(struct virtio_device *vdev) goto out_free_index; } - /* This reference is dropped in virtblk_remove(). */ - refcount_set(&vblk->refs, 1); mutex_init(&vblk->vdev_mutex); vblk->vdev = vdev; @@ -968,7 +930,7 @@ static void virtblk_remove(struct virtio_device *vdev) flush_work(&vblk->config_work); del_gendisk(vblk->disk); - blk_cleanup_disk(vblk->disk); + blk_cleanup_queue(vblk->disk->queue); blk_mq_free_tag_set(&vblk->tag_set); mutex_lock(&vblk->vdev_mutex); @@ -984,7 +946,7 @@ static void virtblk_remove(struct virtio_device *vdev) mutex_unlock(&vblk->vdev_mutex); - virtblk_put(vblk); + put_disk(vblk->disk); } #ifdef CONFIG_PM_SLEEP diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c index 14e452896d04..d1e26461a64e 100644 --- a/drivers/block/xen-blkback/blkback.c +++ b/drivers/block/xen-blkback/blkback.c @@ -1326,16 +1326,13 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring, pages[i]->page, seg[i].nsec << 9, seg[i].offset) == 0)) { - bio = bio_alloc(GFP_KERNEL, bio_max_segs(nseg - i)); - if (unlikely(bio == NULL)) - goto fail_put_bio; - + bio = bio_alloc(preq.bdev, bio_max_segs(nseg - i), + operation | operation_flags, + GFP_KERNEL); biolist[nbio++] = bio; - bio_set_dev(bio, preq.bdev); bio->bi_private = pending_req; bio->bi_end_io = end_block_io_op; bio->bi_iter.bi_sector = preq.sector_number; - bio_set_op_attrs(bio, operation, operation_flags); } preq.sector_number += seg[i].nsec; @@ -1345,15 +1342,11 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring, if (!bio) { BUG_ON(operation_flags != REQ_PREFLUSH); - bio = bio_alloc(GFP_KERNEL, 0); - if (unlikely(bio == NULL)) - goto fail_put_bio; - + bio = bio_alloc(preq.bdev, 0, operation | operation_flags, + GFP_KERNEL); biolist[nbio++] = bio; - bio_set_dev(bio, preq.bdev); bio->bi_private = pending_req; bio->bi_end_io = end_block_io_op; - bio_set_op_attrs(bio, operation, operation_flags); } atomic_set(&pending_req->pendcnt, nbio); @@ -1381,14 +1374,6 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring, free_req(ring, pending_req); msleep(1); /* back off a bit */ return -EIO; - - fail_put_bio: - for (i = 0; i < nbio; i++) - bio_put(biolist[i]); - atomic_set(&pending_req->pendcnt, 1); - __end_block_io_op(pending_req, BLK_STS_RESOURCE); - msleep(1); /* back off a bit */ - return -EIO; } diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index cb253d80d72b..a3a5e1e71326 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -22,7 +22,6 @@ #include <linux/blkdev.h> #include <linux/buffer_head.h> #include <linux/device.h> -#include <linux/genhd.h> #include <linux/highmem.h> #include <linux/slab.h> #include <linux/backing-dev.h> @@ -617,24 +616,21 @@ static int read_from_bdev_async(struct zram *zram, struct bio_vec *bvec, { struct bio *bio; - bio = bio_alloc(GFP_NOIO, 1); + bio = bio_alloc(zram->bdev, 1, parent ? parent->bi_opf : REQ_OP_READ, + GFP_NOIO); if (!bio) return -ENOMEM; bio->bi_iter.bi_sector = entry * (PAGE_SIZE >> 9); - bio_set_dev(bio, zram->bdev); if (!bio_add_page(bio, bvec->bv_page, bvec->bv_len, bvec->bv_offset)) { bio_put(bio); return -EIO; } - if (!parent) { - bio->bi_opf = REQ_OP_READ; + if (!parent) bio->bi_end_io = zram_page_end_io; - } else { - bio->bi_opf = parent->bi_opf; + else bio_chain(bio, parent); - } submit_bio(bio); return 1; @@ -747,10 +743,9 @@ static ssize_t writeback_store(struct device *dev, continue; } - bio_init(&bio, &bio_vec, 1); - bio_set_dev(&bio, zram->bdev); + bio_init(&bio, zram->bdev, &bio_vec, 1, + REQ_OP_WRITE | REQ_SYNC); bio.bi_iter.bi_sector = blk_idx * (PAGE_SIZE >> 9); - bio.bi_opf = REQ_OP_WRITE | REQ_SYNC; bio_add_page(&bio, bvec.bv_page, bvec.bv_len, bvec.bv_offset); diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c index faead41709bc..8e78b37d0f6a 100644 --- a/drivers/cdrom/gdrom.c +++ b/drivers/cdrom/gdrom.c @@ -15,7 +15,6 @@ #include <linux/slab.h> #include <linux/dma-mapping.h> #include <linux/cdrom.h> -#include <linux/genhd.h> #include <linux/bio.h> #include <linux/blk-mq.h> #include <linux/interrupt.h> diff --git a/drivers/char/random.c b/drivers/char/random.c index 0bdefada7453..85f4cca6e707 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -38,7 +38,7 @@ #include <linux/poll.h> #include <linux/init.h> #include <linux/fs.h> -#include <linux/genhd.h> +#include <linux/blkdev.h> #include <linux/interrupt.h> #include <linux/mm.h> #include <linux/nodemask.h> diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig index b5ea378e66cb..998a5cfdbc4e 100644 --- a/drivers/md/Kconfig +++ b/drivers/md/Kconfig @@ -204,6 +204,7 @@ config BLK_DEV_DM tristate "Device mapper support" select BLOCK_HOLDER_DEPRECATED if SYSFS select BLK_DEV_DM_BUILTIN + select BLK_MQ_STACKING depends on DAX || DAX=n help Device-mapper is a low level volume manager. It works by allowing diff --git a/drivers/md/bcache/io.c b/drivers/md/bcache/io.c index 9c6f9ec55b72..020712c5203f 100644 --- a/drivers/md/bcache/io.c +++ b/drivers/md/bcache/io.c @@ -26,7 +26,8 @@ struct bio *bch_bbio_alloc(struct cache_set *c) struct bbio *b = mempool_alloc(&c->bio_meta, GFP_NOIO); struct bio *bio = &b->bio; - bio_init(bio, bio->bi_inline_vecs, meta_bucket_pages(&c->cache->sb)); + bio_init(bio, NULL, bio->bi_inline_vecs, + meta_bucket_pages(&c->cache->sb), 0); return bio; } diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c index 61bd79babf7a..7c2ca52ca3e4 100644 --- a/drivers/md/bcache/journal.c +++ b/drivers/md/bcache/journal.c @@ -53,14 +53,12 @@ static int journal_read_bucket(struct cache *ca, struct list_head *list, reread: left = ca->sb.bucket_size - offset; len = min_t(unsigned int, left, PAGE_SECTORS << JSET_BITS); - bio_reset(bio); + bio_reset(bio, ca->bdev, REQ_OP_READ); bio->bi_iter.bi_sector = bucket + offset; - bio_set_dev(bio, ca->bdev); bio->bi_iter.bi_size = len << 9; bio->bi_end_io = journal_read_endio; bio->bi_private = &cl; - bio_set_op_attrs(bio, REQ_OP_READ, 0); bch_bio_map(bio, data); closure_bio_submit(ca->set, bio, &cl); @@ -611,11 +609,9 @@ static void do_journal_discard(struct cache *ca) atomic_set(&ja->discard_in_flight, DISCARD_IN_FLIGHT); - bio_init(bio, bio->bi_inline_vecs, 1); - bio_set_op_attrs(bio, REQ_OP_DISCARD, 0); + bio_init(bio, ca->bdev, bio->bi_inline_vecs, 1, REQ_OP_DISCARD); bio->bi_iter.bi_sector = bucket_to_sector(ca->set, ca->sb.d[ja->discard_idx]); - bio_set_dev(bio, ca->bdev); bio->bi_iter.bi_size = bucket_bytes(ca); bio->bi_end_io = journal_discard_endio; @@ -773,16 +769,14 @@ static void journal_write_unlocked(struct closure *cl) atomic_long_add(sectors, &ca->meta_sectors_written); - bio_reset(bio); + bio_reset(bio, ca->bdev, REQ_OP_WRITE | + REQ_SYNC | REQ_META | REQ_PREFLUSH | REQ_FUA); + bch_bio_map(bio, w->data); bio->bi_iter.bi_sector = PTR_OFFSET(k, i); - bio_set_dev(bio, ca->bdev); bio->bi_iter.bi_size = sectors << 9; bio->bi_end_io = journal_write_endio; bio->bi_private = w; - bio_set_op_attrs(bio, REQ_OP_WRITE, - REQ_SYNC|REQ_META|REQ_PREFLUSH|REQ_FUA); - bch_bio_map(bio, w->data); trace_bcache_journal_write(bio, w->data->keys); bio_list_add(&list, bio); diff --git a/drivers/md/bcache/movinggc.c b/drivers/md/bcache/movinggc.c index b9c3d27ec093..99499d1f6e66 100644 --- a/drivers/md/bcache/movinggc.c +++ b/drivers/md/bcache/movinggc.c @@ -79,8 +79,8 @@ static void moving_init(struct moving_io *io) { struct bio *bio = &io->bio.bio; - bio_init(bio, bio->bi_inline_vecs, - DIV_ROUND_UP(KEY_SIZE(&io->w->key), PAGE_SECTORS)); + bio_init(bio, NULL, bio->bi_inline_vecs, + DIV_ROUND_UP(KEY_SIZE(&io->w->key), PAGE_SECTORS), 0); bio_get(bio); bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0)); diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c index d15aae6c51c1..6869e010475a 100644 --- a/drivers/md/bcache/request.c +++ b/drivers/md/bcache/request.c @@ -685,8 +685,7 @@ static void do_bio_hook(struct search *s, { struct bio *bio = &s->bio.bio; - bio_init(bio, NULL, 0); - __bio_clone_fast(bio, orig_bio); + bio_init_clone(bio->bi_bdev, bio, orig_bio, GFP_NOIO); /* * bi_end_io can be set separately somewhere else, e.g. the * variants in, @@ -831,11 +830,11 @@ static void cached_dev_read_done(struct closure *cl) */ if (s->iop.bio) { - bio_reset(s->iop.bio); + bio_reset(s->iop.bio, s->cache_miss->bi_bdev, REQ_OP_READ); s->iop.bio->bi_iter.bi_sector = s->cache_miss->bi_iter.bi_sector; - bio_copy_dev(s->iop.bio, s->cache_miss); s->iop.bio->bi_iter.bi_size = s->insert_bio_sectors << 9; + bio_clone_blkg_association(s->iop.bio, s->cache_miss); bch_bio_map(s->iop.bio, NULL); bio_copy_data(s->cache_miss, s->iop.bio); @@ -913,14 +912,13 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s, /* btree_search_recurse()'s btree iterator is no good anymore */ ret = miss == bio ? MAP_DONE : -EINTR; - cache_bio = bio_alloc_bioset(GFP_NOWAIT, + cache_bio = bio_alloc_bioset(miss->bi_bdev, DIV_ROUND_UP(s->insert_bio_sectors, PAGE_SECTORS), - &dc->disk.bio_split); + 0, GFP_NOWAIT, &dc->disk.bio_split); if (!cache_bio) goto out_submit; cache_bio->bi_iter.bi_sector = miss->bi_iter.bi_sector; - bio_copy_dev(cache_bio, miss); cache_bio->bi_iter.bi_size = s->insert_bio_sectors << 9; cache_bio->bi_end_io = backing_request_endio; @@ -1025,21 +1023,21 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s) */ struct bio *flush; - flush = bio_alloc_bioset(GFP_NOIO, 0, - &dc->disk.bio_split); + flush = bio_alloc_bioset(bio->bi_bdev, 0, + REQ_OP_WRITE | REQ_PREFLUSH, + GFP_NOIO, &dc->disk.bio_split); if (!flush) { s->iop.status = BLK_STS_RESOURCE; goto insert_data; } - bio_copy_dev(flush, bio); flush->bi_end_io = backing_request_endio; flush->bi_private = cl; - flush->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; /* I/O request sent to backing device */ closure_bio_submit(s->iop.c, flush, cl); } } else { - s->iop.bio = bio_clone_fast(bio, GFP_NOIO, &dc->disk.bio_split); + s->iop.bio = bio_alloc_clone(bio->bi_bdev, bio, GFP_NOIO, + &dc->disk.bio_split); /* I/O request sent to backing device */ bio->bi_end_io = backing_request_endio; closure_bio_submit(s->iop.c, bio, cl); diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index 140f35dc0c45..bf3de149d3c9 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -18,7 +18,6 @@ #include <linux/blkdev.h> #include <linux/pagemap.h> #include <linux/debugfs.h> -#include <linux/genhd.h> #include <linux/idr.h> #include <linux/kthread.h> #include <linux/workqueue.h> @@ -343,8 +342,7 @@ void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent) down(&dc->sb_write_mutex); closure_init(cl, parent); - bio_init(bio, dc->sb_bv, 1); - bio_set_dev(bio, dc->bdev); + bio_init(bio, dc->bdev, dc->sb_bv, 1, 0); bio->bi_end_io = write_bdev_super_endio; bio->bi_private = dc; @@ -387,8 +385,7 @@ void bcache_write_super(struct cache_set *c) if (ca->sb.version < version) ca->sb.version = version; - bio_init(bio, ca->sb_bv, 1); - bio_set_dev(bio, ca->bdev); + bio_init(bio, ca->bdev, ca->sb_bv, 1, 0); bio->bi_end_io = write_super_endio; bio->bi_private = ca; @@ -2240,7 +2237,7 @@ static int cache_alloc(struct cache *ca) __module_get(THIS_MODULE); kobject_init(&ca->kobj, &bch_cache_ktype); - bio_init(&ca->journal.bio, ca->journal.bio.bi_inline_vecs, 8); + bio_init(&ca->journal.bio, NULL, ca->journal.bio.bi_inline_vecs, 8, 0); /* * when ca->sb.njournal_buckets is not zero, journal exists, diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c index c7560f66dca8..d42301e6309d 100644 --- a/drivers/md/bcache/writeback.c +++ b/drivers/md/bcache/writeback.c @@ -292,8 +292,8 @@ static void dirty_init(struct keybuf_key *w) struct dirty_io *io = w->private; struct bio *bio = &io->bio; - bio_init(bio, bio->bi_inline_vecs, - DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS)); + bio_init(bio, NULL, bio->bi_inline_vecs, + DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS), 0); if (!io->dc->writeback_percent) bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0)); diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c index 447d030036d1..89fdfb49d564 100644 --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c @@ -744,21 +744,14 @@ static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio) spin_unlock_irq(&cache->lock); } -static void __remap_to_origin_clear_discard(struct cache *cache, struct bio *bio, - dm_oblock_t oblock, bool bio_has_pbd) -{ - if (bio_has_pbd) - check_if_tick_bio_needed(cache, bio); - remap_to_origin(cache, bio); - if (bio_data_dir(bio) == WRITE) - clear_discard(cache, oblock_to_dblock(cache, oblock)); -} - static void remap_to_origin_clear_discard(struct cache *cache, struct bio *bio, dm_oblock_t oblock) { // FIXME: check_if_tick_bio_needed() is called way too much through this interface - __remap_to_origin_clear_discard(cache, bio, oblock, true); + check_if_tick_bio_needed(cache, bio); + remap_to_origin(cache, bio); + if (bio_data_dir(bio) == WRITE) + clear_discard(cache, oblock_to_dblock(cache, oblock)); } static void remap_to_cache_dirty(struct cache *cache, struct bio *bio, @@ -826,16 +819,15 @@ static void issue_op(struct bio *bio, void *context) static void remap_to_origin_and_cache(struct cache *cache, struct bio *bio, dm_oblock_t oblock, dm_cblock_t cblock) { - struct bio *origin_bio = bio_clone_fast(bio, GFP_NOIO, &cache->bs); + struct bio *origin_bio = bio_alloc_clone(cache->origin_dev->bdev, bio, + GFP_NOIO, &cache->bs); BUG_ON(!origin_bio); bio_chain(origin_bio, bio); - /* - * Passing false to __remap_to_origin_clear_discard() skips - * all code that might use per_bio_data (since clone doesn't have it) - */ - __remap_to_origin_clear_discard(cache, origin_bio, oblock, false); + + if (bio_data_dir(origin_bio) == WRITE) + clear_discard(cache, oblock_to_dblock(cache, oblock)); submit_bio(origin_bio); remap_to_cache(cache, bio, cblock); diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h index b855fef4f38a..72d18c3fbf1f 100644 --- a/drivers/md/dm-core.h +++ b/drivers/md/dm-core.h @@ -11,7 +11,6 @@ #include <linux/kthread.h> #include <linux/ktime.h> -#include <linux/genhd.h> #include <linux/blk-mq.h> #include <linux/blk-crypto-profile.h> diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index d4ae31558826..e2b0af4a2ee8 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -234,7 +234,7 @@ static volatile unsigned long dm_crypt_pages_per_client; #define DM_CRYPT_MEMORY_PERCENT 2 #define DM_CRYPT_MIN_PAGES_PER_CLIENT (BIO_MAX_VECS * 16) -static void clone_init(struct dm_crypt_io *, struct bio *); +static void crypt_endio(struct bio *clone); static void kcryptd_queue_crypt(struct dm_crypt_io *io); static struct scatterlist *crypt_get_sg_data(struct crypt_config *cc, struct scatterlist *sg); @@ -1364,11 +1364,10 @@ static int crypt_convert_block_aead(struct crypt_config *cc, } if (r == -EBADMSG) { - char b[BDEVNAME_SIZE]; sector_t s = le64_to_cpu(*sector); - DMERR_LIMIT("%s: INTEGRITY AEAD ERROR, sector %llu", - bio_devname(ctx->bio_in, b), s); + DMERR_LIMIT("%pg: INTEGRITY AEAD ERROR, sector %llu", + ctx->bio_in->bi_bdev, s); dm_audit_log_bio(DM_MSG_PREFIX, "integrity-aead", ctx->bio_in, s, 0); } @@ -1672,11 +1671,10 @@ retry: if (unlikely(gfp_mask & __GFP_DIRECT_RECLAIM)) mutex_lock(&cc->bio_alloc_lock); - clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, &cc->bs); - if (!clone) - goto out; - - clone_init(io, clone); + clone = bio_alloc_bioset(cc->dev->bdev, nr_iovecs, io->base_bio->bi_opf, + GFP_NOIO, &cc->bs); + clone->bi_private = io; + clone->bi_end_io = crypt_endio; remaining_size = size; @@ -1702,7 +1700,7 @@ retry: bio_put(clone); clone = NULL; } -out: + if (unlikely(gfp_mask & __GFP_DIRECT_RECLAIM)) mutex_unlock(&cc->bio_alloc_lock); @@ -1829,34 +1827,25 @@ static void crypt_endio(struct bio *clone) crypt_dec_pending(io); } -static void clone_init(struct dm_crypt_io *io, struct bio *clone) -{ - struct crypt_config *cc = io->cc; - - clone->bi_private = io; - clone->bi_end_io = crypt_endio; - bio_set_dev(clone, cc->dev->bdev); - clone->bi_opf = io->base_bio->bi_opf; -} - static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp) { struct crypt_config *cc = io->cc; struct bio *clone; /* - * We need the original biovec array in order to decrypt - * the whole bio data *afterwards* -- thanks to immutable - * biovecs we don't need to worry about the block layer - * modifying the biovec array; so leverage bio_clone_fast(). + * We need the original biovec array in order to decrypt the whole bio + * data *afterwards* -- thanks to immutable biovecs we don't need to + * worry about the block layer modifying the biovec array; so leverage + * bio_alloc_clone(). */ - clone = bio_clone_fast(io->base_bio, gfp, &cc->bs); + clone = bio_alloc_clone(cc->dev->bdev, io->base_bio, gfp, &cc->bs); if (!clone) return 1; + clone->bi_private = io; + clone->bi_end_io = crypt_endio; crypt_inc_pending(io); - clone_init(io, clone); clone->bi_iter.bi_sector = cc->start + io->sector; if (dm_crypt_integrity_io_alloc(io, clone)) { @@ -2179,11 +2168,10 @@ static void kcryptd_async_done(struct crypto_async_request *async_req, error = cc->iv_gen_ops->post(cc, org_iv_of_dmreq(cc, dmreq), dmreq); if (error == -EBADMSG) { - char b[BDEVNAME_SIZE]; sector_t s = le64_to_cpu(*org_sector_of_dmreq(cc, dmreq)); - DMERR_LIMIT("%s: INTEGRITY AEAD ERROR, sector %llu", - bio_devname(ctx->bio_in, b), s); + DMERR_LIMIT("%pg: INTEGRITY AEAD ERROR, sector %llu", + ctx->bio_in->bi_bdev, s); dm_audit_log_bio(DM_MSG_PREFIX, "integrity-aead", ctx->bio_in, s, 0); io->error = BLK_STS_PROTECTION; diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c index eb4b5e52bd6f..c58a5111cb57 100644 --- a/drivers/md/dm-integrity.c +++ b/drivers/md/dm-integrity.c @@ -1788,12 +1788,11 @@ again: checksums_ptr - checksums, dio->op == REQ_OP_READ ? TAG_CMP : TAG_WRITE); if (unlikely(r)) { if (r > 0) { - char b[BDEVNAME_SIZE]; sector_t s; s = sector - ((r + ic->tag_size - 1) / ic->tag_size); - DMERR_LIMIT("%s: Checksum failed at sector 0x%llx", - bio_devname(bio, b), s); + DMERR_LIMIT("%pg: Checksum failed at sector 0x%llx", + bio->bi_bdev, s); r = -EILSEQ; atomic64_inc(&ic->number_of_mismatches); dm_audit_log_bio(DM_MSG_PREFIX, "integrity-checksum", diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c index 2d3cda0acacb..23e038f8dc84 100644 --- a/drivers/md/dm-io.c +++ b/drivers/md/dm-io.c @@ -345,11 +345,10 @@ static void do_region(int op, int op_flags, unsigned region, (PAGE_SIZE >> SECTOR_SHIFT))); } - bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, &io->client->bios); + bio = bio_alloc_bioset(where->bdev, num_bvecs, op | op_flags, + GFP_NOIO, &io->client->bios); bio->bi_iter.bi_sector = where->sector + (where->count - remaining); - bio_set_dev(bio, where->bdev); bio->bi_end_io = endio; - bio_set_op_attrs(bio, op, op_flags); store_io_and_region_in_bio(bio, io, region); if (op == REQ_OP_DISCARD || op == REQ_OP_WRITE_ZEROES) { diff --git a/drivers/md/dm-log-writes.c b/drivers/md/dm-log-writes.c index 139b09b06eda..c9d036d6bb2e 100644 --- a/drivers/md/dm-log-writes.c +++ b/drivers/md/dm-log-writes.c @@ -217,18 +217,12 @@ static int write_metadata(struct log_writes_c *lc, void *entry, void *ptr; size_t ret; - bio = bio_alloc(GFP_KERNEL, 1); - if (!bio) { - DMERR("Couldn't alloc log bio"); - goto error; - } + bio = bio_alloc(lc->logdev->bdev, 1, REQ_OP_WRITE, GFP_KERNEL); bio->bi_iter.bi_size = 0; bio->bi_iter.bi_sector = sector; - bio_set_dev(bio, lc->logdev->bdev); bio->bi_end_io = (sector == WRITE_LOG_SUPER_SECTOR) ? log_end_super : log_end_io; bio->bi_private = lc; - bio_set_op_attrs(bio, REQ_OP_WRITE, 0); page = alloc_page(GFP_KERNEL); if (!page) { @@ -275,18 +269,12 @@ static int write_inline_data(struct log_writes_c *lc, void *entry, atomic_inc(&lc->io_blocks); - bio = bio_alloc(GFP_KERNEL, bio_pages); - if (!bio) { - DMERR("Couldn't alloc inline data bio"); - goto error; - } - + bio = bio_alloc(lc->logdev->bdev, bio_pages, REQ_OP_WRITE, + GFP_KERNEL); bio->bi_iter.bi_size = 0; bio->bi_iter.bi_sector = sector; - bio_set_dev(bio, lc->logdev->bdev); bio->bi_end_io = log_end_io; bio->bi_private = lc; - bio_set_op_attrs(bio, REQ_OP_WRITE, 0); for (i = 0; i < bio_pages; i++) { pg_datalen = min_t(int, datalen, PAGE_SIZE); @@ -322,7 +310,6 @@ static int write_inline_data(struct log_writes_c *lc, void *entry, error_bio: bio_free_pages(bio); bio_put(bio); -error: put_io_block(lc); return -1; } @@ -363,17 +350,12 @@ static int log_one_block(struct log_writes_c *lc, goto out; atomic_inc(&lc->io_blocks); - bio = bio_alloc(GFP_KERNEL, bio_max_segs(block->vec_cnt)); - if (!bio) { - DMERR("Couldn't alloc log bio"); - goto error; - } + bio = bio_alloc(lc->logdev->bdev, bio_max_segs(block->vec_cnt), + REQ_OP_WRITE, GFP_KERNEL); bio->bi_iter.bi_size = 0; bio->bi_iter.bi_sector = sector; - bio_set_dev(bio, lc->logdev->bdev); bio->bi_end_io = log_end_io; bio->bi_private = lc; - bio_set_op_attrs(bio, REQ_OP_WRITE, 0); for (i = 0; i < block->vec_cnt; i++) { /* @@ -385,18 +367,13 @@ static int log_one_block(struct log_writes_c *lc, if (ret != block->vecs[i].bv_len) { atomic_inc(&lc->io_blocks); submit_bio(bio); - bio = bio_alloc(GFP_KERNEL, - bio_max_segs(block->vec_cnt - i)); - if (!bio) { - DMERR("Couldn't alloc log bio"); - goto error; - } + bio = bio_alloc(lc->logdev->bdev, + bio_max_segs(block->vec_cnt - i), + REQ_OP_WRITE, GFP_KERNEL); bio->bi_iter.bi_size = 0; bio->bi_iter.bi_sector = sector; - bio_set_dev(bio, lc->logdev->bdev); bio->bi_end_io = log_end_io; bio->bi_private = lc; - bio_set_op_attrs(bio, REQ_OP_WRITE, 0); ret = bio_add_page(bio, block->vecs[i].bv_page, block->vecs[i].bv_len, 0); diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c index 579ab6183d4d..6948d5db9092 100644 --- a/drivers/md/dm-rq.c +++ b/drivers/md/dm-rq.c @@ -303,21 +303,6 @@ static void end_clone_request(struct request *clone, blk_status_t error) dm_complete_request(tio->orig, error); } -static blk_status_t dm_dispatch_clone_request(struct request *clone, struct request *rq) -{ - blk_status_t r; - - if (blk_queue_io_stat(clone->q)) - clone->rq_flags |= RQF_IO_STAT; - - clone->start_time_ns = ktime_get_ns(); - r = blk_insert_cloned_request(clone->q, clone); - if (r != BLK_STS_OK && r != BLK_STS_RESOURCE && r != BLK_STS_DEV_RESOURCE) - /* must complete clone in terms of original request */ - dm_complete_request(rq, r); - return r; -} - static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig, void *data) { @@ -398,13 +383,20 @@ static int map_request(struct dm_rq_target_io *tio) /* The target has remapped the I/O so dispatch it */ trace_block_rq_remap(clone, disk_devt(dm_disk(md)), blk_rq_pos(rq)); - ret = dm_dispatch_clone_request(clone, rq); - if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE) { + ret = blk_insert_cloned_request(clone); + switch (ret) { + case BLK_STS_OK: + break; + case BLK_STS_RESOURCE: + case BLK_STS_DEV_RESOURCE: blk_rq_unprep_clone(clone); blk_mq_cleanup_rq(clone); tio->ti->type->release_clone_rq(clone, &tio->info); tio->clone = NULL; return DM_MAPIO_REQUEUE; + default: + /* must complete clone in terms of original request */ + dm_complete_request(rq, ret); } break; case DM_MAPIO_REQUEUE: diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c index dcf34c6b05ad..0d336b5ec571 100644 --- a/drivers/md/dm-snap.c +++ b/drivers/md/dm-snap.c @@ -141,11 +141,6 @@ struct dm_snapshot { * for them to be committed. */ struct bio_list bios_queued_during_merge; - - /* - * Flush data after merge. - */ - struct bio flush_bio; }; /* @@ -1127,17 +1122,6 @@ shut: static void error_bios(struct bio *bio); -static int flush_data(struct dm_snapshot *s) -{ - struct bio *flush_bio = &s->flush_bio; - - bio_reset(flush_bio); - bio_set_dev(flush_bio, s->origin->bdev); - flush_bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; - - return submit_bio_wait(flush_bio); -} - static void merge_callback(int read_err, unsigned long write_err, void *context) { struct dm_snapshot *s = context; @@ -1151,7 +1135,7 @@ static void merge_callback(int read_err, unsigned long write_err, void *context) goto shut; } - if (flush_data(s) < 0) { + if (blkdev_issue_flush(s->origin->bdev) < 0) { DMERR("Flush after merge failed: shutting down merge"); goto shut; } @@ -1340,7 +1324,6 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv) s->first_merging_chunk = 0; s->num_merging_chunks = 0; bio_list_init(&s->bios_queued_during_merge); - bio_init(&s->flush_bio, NULL, 0); /* Allocate hash table for COW data */ if (init_hash_tables(s)) { @@ -1528,8 +1511,6 @@ static void snapshot_dtr(struct dm_target *ti) dm_exception_store_destroy(s->store); - bio_uninit(&s->flush_bio); - dm_put_device(ti, s->cow); dm_put_device(ti, s->origin); diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index ec119d2422d5..f4234d615aa1 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c @@ -282,8 +282,6 @@ struct pool { struct dm_bio_prison_cell **cell_sort_array; mempool_t mapping_pool; - - struct bio flush_bio; }; static void metadata_operation_failed(struct pool *pool, const char *op, int r); @@ -1179,25 +1177,17 @@ static void process_prepared_discard_passdown_pt1(struct dm_thin_new_mapping *m) return; } - discard_parent = bio_alloc(GFP_NOIO, 1); - if (!discard_parent) { - DMWARN("%s: unable to allocate top level discard bio for passdown. Skipping passdown.", - dm_device_name(tc->pool->pool_md)); - queue_passdown_pt2(m); - - } else { - discard_parent->bi_end_io = passdown_endio; - discard_parent->bi_private = m; - - if (m->maybe_shared) - passdown_double_checking_shared_status(m, discard_parent); - else { - struct discard_op op; - - begin_discard(&op, tc, discard_parent); - r = issue_discard(&op, m->data_block, data_end); - end_discard(&op, r); - } + discard_parent = bio_alloc(NULL, 1, 0, GFP_NOIO); + discard_parent->bi_end_io = passdown_endio; + discard_parent->bi_private = m; + if (m->maybe_shared) + passdown_double_checking_shared_status(m, discard_parent); + else { + struct discard_op op; + + begin_discard(&op, tc, discard_parent); + r = issue_discard(&op, m->data_block, data_end); + end_discard(&op, r); } } @@ -2913,7 +2903,6 @@ static void __pool_destroy(struct pool *pool) if (pool->next_mapping) mempool_free(pool->next_mapping, &pool->mapping_pool); mempool_exit(&pool->mapping_pool); - bio_uninit(&pool->flush_bio); dm_deferred_set_destroy(pool->shared_read_ds); dm_deferred_set_destroy(pool->all_io_ds); kfree(pool); @@ -2994,7 +2983,6 @@ static struct pool *pool_create(struct mapped_device *pool_md, pool->low_water_triggered = false; pool->suspended = true; pool->out_of_data_space = false; - bio_init(&pool->flush_bio, NULL, 0); pool->shared_read_ds = dm_deferred_set_create(); if (!pool->shared_read_ds) { @@ -3201,13 +3189,8 @@ static void metadata_low_callback(void *context) static int metadata_pre_commit_callback(void *context) { struct pool *pool = context; - struct bio *flush_bio = &pool->flush_bio; - - bio_reset(flush_bio); - bio_set_dev(flush_bio, pool->data_dev); - flush_bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; - return submit_bio_wait(flush_bio); + return blkdev_issue_flush(pool->data_dev); } static sector_t get_dev_size(struct block_device *bdev) diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c index 4f31591d2d25..5630b470ba42 100644 --- a/drivers/md/dm-writecache.c +++ b/drivers/md/dm-writecache.c @@ -1821,11 +1821,11 @@ static void __writecache_writeback_pmem(struct dm_writecache *wc, struct writeba max_pages = e->wc_list_contiguous; - bio = bio_alloc_bioset(GFP_NOIO, max_pages, &wc->bio_set); + bio = bio_alloc_bioset(wc->dev->bdev, max_pages, REQ_OP_WRITE, + GFP_NOIO, &wc->bio_set); wb = container_of(bio, struct writeback_struct, bio); wb->wc = wc; bio->bi_end_io = writecache_writeback_endio; - bio_set_dev(bio, wc->dev->bdev); bio->bi_iter.bi_sector = read_original_sector(wc, e); if (max_pages <= WB_LIST_INLINE || unlikely(!(wb->wc_list = kmalloc_array(max_pages, sizeof(struct wc_entry *), @@ -1852,7 +1852,8 @@ static void __writecache_writeback_pmem(struct dm_writecache *wc, struct writeba wb->wc_list[wb->wc_list_n++] = f; e = f; } - bio_set_op_attrs(bio, REQ_OP_WRITE, WC_MODE_FUA(wc) * REQ_FUA); + if (WC_MODE_FUA(wc)) + bio->bi_opf |= REQ_FUA; if (writecache_has_error(wc)) { bio->bi_status = BLK_STS_IOERR; bio_endio(bio); diff --git a/drivers/md/dm-zoned-metadata.c b/drivers/md/dm-zoned-metadata.c index ee4626d08557..e5f1eb27ce2e 100644 --- a/drivers/md/dm-zoned-metadata.c +++ b/drivers/md/dm-zoned-metadata.c @@ -550,11 +550,8 @@ static struct dmz_mblock *dmz_get_mblock_slow(struct dmz_metadata *zmd, if (!mblk) return ERR_PTR(-ENOMEM); - bio = bio_alloc(GFP_NOIO, 1); - if (!bio) { - dmz_free_mblock(zmd, mblk); - return ERR_PTR(-ENOMEM); - } + bio = bio_alloc(dev->bdev, 1, REQ_OP_READ | REQ_META | REQ_PRIO, + GFP_NOIO); spin_lock(&zmd->mblk_lock); @@ -578,10 +575,8 @@ static struct dmz_mblock *dmz_get_mblock_slow(struct dmz_metadata *zmd, /* Submit read BIO */ bio->bi_iter.bi_sector = dmz_blk2sect(block); - bio_set_dev(bio, dev->bdev); bio->bi_private = mblk; bio->bi_end_io = dmz_mblock_bio_end_io; - bio_set_op_attrs(bio, REQ_OP_READ, REQ_META | REQ_PRIO); bio_add_page(bio, mblk->page, DMZ_BLOCK_SIZE, 0); submit_bio(bio); @@ -725,19 +720,14 @@ static int dmz_write_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk, if (dmz_bdev_is_dying(dev)) return -EIO; - bio = bio_alloc(GFP_NOIO, 1); - if (!bio) { - set_bit(DMZ_META_ERROR, &mblk->state); - return -ENOMEM; - } + bio = bio_alloc(dev->bdev, 1, REQ_OP_WRITE | REQ_META | REQ_PRIO, + GFP_NOIO); set_bit(DMZ_META_WRITING, &mblk->state); bio->bi_iter.bi_sector = dmz_blk2sect(block); - bio_set_dev(bio, dev->bdev); bio->bi_private = mblk; bio->bi_end_io = dmz_mblock_bio_end_io; - bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_META | REQ_PRIO); bio_add_page(bio, mblk->page, DMZ_BLOCK_SIZE, 0); submit_bio(bio); @@ -759,13 +749,9 @@ static int dmz_rdwr_block(struct dmz_dev *dev, int op, if (dmz_bdev_is_dying(dev)) return -EIO; - bio = bio_alloc(GFP_NOIO, 1); - if (!bio) - return -ENOMEM; - + bio = bio_alloc(dev->bdev, 1, op | REQ_SYNC | REQ_META | REQ_PRIO, + GFP_NOIO); bio->bi_iter.bi_sector = dmz_blk2sect(block); - bio_set_dev(bio, dev->bdev); - bio_set_op_attrs(bio, op, REQ_SYNC | REQ_META | REQ_PRIO); bio_add_page(bio, page, DMZ_BLOCK_SIZE, 0); ret = submit_bio_wait(bio); bio_put(bio); diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c index 166c4e9d99c9..a3f6d3ef3817 100644 --- a/drivers/md/dm-zoned-target.c +++ b/drivers/md/dm-zoned-target.c @@ -125,11 +125,10 @@ static int dmz_submit_bio(struct dmz_target *dmz, struct dm_zone *zone, if (dev->flags & DMZ_BDEV_DYING) return -EIO; - clone = bio_clone_fast(bio, GFP_NOIO, &dmz->bio_set); + clone = bio_alloc_clone(dev->bdev, bio, GFP_NOIO, &dmz->bio_set); if (!clone) return -ENOMEM; - bio_set_dev(clone, dev->bdev); bioctx->dev = dev; clone->bi_iter.bi_sector = dmz_start_sect(dmz->metadata, zone) + dmz_blk2sect(chunk_block); diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 997ace47bbd5..183ce0d6728f 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -79,10 +79,14 @@ struct clone_info { #define DM_IO_BIO_OFFSET \ (offsetof(struct dm_target_io, clone) + offsetof(struct dm_io, tio)) +static inline struct dm_target_io *clone_to_tio(struct bio *clone) +{ + return container_of(clone, struct dm_target_io, clone); +} + void *dm_per_bio_data(struct bio *bio, size_t data_size) { - struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); - if (!tio->inside_dm_io) + if (!clone_to_tio(bio)->inside_dm_io) return (char *)bio - DM_TARGET_IO_BIO_OFFSET - data_size; return (char *)bio - DM_IO_BIO_OFFSET - data_size; } @@ -477,10 +481,7 @@ out: u64 dm_start_time_ns_from_clone(struct bio *bio) { - struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); - struct dm_io *io = tio->io; - - return jiffies_to_nsecs(io->start_time); + return jiffies_to_nsecs(clone_to_tio(bio)->io->start_time); } EXPORT_SYMBOL_GPL(dm_start_time_ns_from_clone); @@ -519,11 +520,9 @@ static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio) struct dm_target_io *tio; struct bio *clone; - clone = bio_alloc_bioset(GFP_NOIO, 0, &md->io_bs); - if (!clone) - return NULL; + clone = bio_alloc_clone(bio->bi_bdev, bio, GFP_NOIO, &md->io_bs); - tio = container_of(clone, struct dm_target_io, clone); + tio = clone_to_tio(clone); tio->inside_dm_io = true; tio->io = NULL; @@ -545,8 +544,8 @@ static void free_io(struct mapped_device *md, struct dm_io *io) bio_put(&io->tio.clone); } -static struct dm_target_io *alloc_tio(struct clone_info *ci, struct dm_target *ti, - unsigned target_bio_nr, gfp_t gfp_mask) +static struct bio *alloc_tio(struct clone_info *ci, struct dm_target *ti, + unsigned target_bio_nr, unsigned *len, gfp_t gfp_mask) { struct dm_target_io *tio; @@ -554,11 +553,12 @@ static struct dm_target_io *alloc_tio(struct clone_info *ci, struct dm_target *t /* the dm_target_io embedded in ci->io is available */ tio = &ci->io->tio; } else { - struct bio *clone = bio_alloc_bioset(gfp_mask, 0, &ci->io->md->bs); + struct bio *clone = bio_alloc_clone(ci->bio->bi_bdev, ci->bio, + gfp_mask, &ci->io->md->bs); if (!clone) return NULL; - tio = container_of(clone, struct dm_target_io, clone); + tio = clone_to_tio(clone); tio->inside_dm_io = false; } @@ -566,15 +566,16 @@ static struct dm_target_io *alloc_tio(struct clone_info *ci, struct dm_target *t tio->io = ci->io; tio->ti = ti; tio->target_bio_nr = target_bio_nr; + tio->len_ptr = len; - return tio; + return &tio->clone; } -static void free_tio(struct dm_target_io *tio) +static void free_tio(struct bio *clone) { - if (tio->inside_dm_io) + if (clone_to_tio(clone)->inside_dm_io) return; - bio_put(&tio->clone); + bio_put(clone); } /* @@ -879,7 +880,7 @@ static bool swap_bios_limit(struct dm_target *ti, struct bio *bio) static void clone_endio(struct bio *bio) { blk_status_t error = bio->bi_status; - struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); + struct dm_target_io *tio = clone_to_tio(bio); struct dm_io *io = tio->io; struct mapped_device *md = tio->io->md; dm_endio_fn endio = tio->ti->type->end_io; @@ -930,7 +931,7 @@ static void clone_endio(struct bio *bio) up(&md->swap_bios_semaphore); } - free_tio(tio); + free_tio(bio); dm_io_dec_pending(io, error); } @@ -1085,7 +1086,7 @@ static int dm_dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff, */ void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors) { - struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); + struct dm_target_io *tio = clone_to_tio(bio); unsigned bi_size = bio->bi_iter.bi_size >> SECTOR_SHIFT; BUG_ON(bio->bi_opf & REQ_PREFLUSH); @@ -1115,11 +1116,11 @@ static noinline void __set_swap_bios_limit(struct mapped_device *md, int latch) mutex_unlock(&md->swap_bios_lock); } -static void __map_bio(struct dm_target_io *tio) +static void __map_bio(struct bio *clone) { + struct dm_target_io *tio = clone_to_tio(clone); int r; sector_t sector; - struct bio *clone = &tio->clone; struct dm_io *io = tio->io; struct dm_target *ti = tio->ti; @@ -1164,7 +1165,7 @@ static void __map_bio(struct dm_target_io *tio) struct mapped_device *md = io->md; up(&md->swap_bios_semaphore); } - free_tio(tio); + free_tio(clone); dm_io_dec_pending(io, BLK_STS_IOERR); break; case DM_MAPIO_REQUEUE: @@ -1172,7 +1173,7 @@ static void __map_bio(struct dm_target_io *tio) struct mapped_device *md = io->md; up(&md->swap_bios_semaphore); } - free_tio(tio); + free_tio(clone); dm_io_dec_pending(io, BLK_STS_DM_REQUEUE); break; default: @@ -1190,106 +1191,75 @@ static void bio_setup_sector(struct bio *bio, sector_t sector, unsigned len) /* * Creates a bio that consists of range of complete bvecs. */ -static int clone_bio(struct dm_target_io *tio, struct bio *bio, - sector_t sector, unsigned len) +static int __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti, + sector_t sector, unsigned *len) { - struct bio *clone = &tio->clone; - int r; - - __bio_clone_fast(clone, bio); - - r = bio_crypt_clone(clone, bio, GFP_NOIO); - if (r < 0) - return r; - - if (bio_integrity(bio)) { - if (unlikely(!dm_target_has_integrity(tio->ti->type) && - !dm_target_passes_integrity(tio->ti->type))) { - DMWARN("%s: the target %s doesn't support integrity data.", - dm_device_name(tio->io->md), - tio->ti->type->name); - return -EIO; - } - - r = bio_integrity_clone(clone, bio, GFP_NOIO); - if (r < 0) - return r; - } + struct bio *bio = ci->bio, *clone; + clone = alloc_tio(ci, ti, 0, len, GFP_NOIO); bio_advance(clone, to_bytes(sector - clone->bi_iter.bi_sector)); - clone->bi_iter.bi_size = to_bytes(len); + clone->bi_iter.bi_size = to_bytes(*len); if (bio_integrity(bio)) bio_integrity_trim(clone); + __map_bio(clone); return 0; } static void alloc_multiple_bios(struct bio_list *blist, struct clone_info *ci, - struct dm_target *ti, unsigned num_bios) + struct dm_target *ti, unsigned num_bios, + unsigned *len) { - struct dm_target_io *tio; + struct bio *bio; int try; - if (!num_bios) - return; - - if (num_bios == 1) { - tio = alloc_tio(ci, ti, 0, GFP_NOIO); - bio_list_add(blist, &tio->clone); - return; - } - for (try = 0; try < 2; try++) { int bio_nr; - struct bio *bio; if (try) mutex_lock(&ci->io->md->table_devices_lock); for (bio_nr = 0; bio_nr < num_bios; bio_nr++) { - tio = alloc_tio(ci, ti, bio_nr, try ? GFP_NOIO : GFP_NOWAIT); - if (!tio) + bio = alloc_tio(ci, ti, bio_nr, len, + try ? GFP_NOIO : GFP_NOWAIT); + if (!bio) break; - bio_list_add(blist, &tio->clone); + bio_list_add(blist, bio); } if (try) mutex_unlock(&ci->io->md->table_devices_lock); if (bio_nr == num_bios) return; - while ((bio = bio_list_pop(blist))) { - tio = container_of(bio, struct dm_target_io, clone); - free_tio(tio); - } + while ((bio = bio_list_pop(blist))) + free_tio(bio); } } -static void __clone_and_map_simple_bio(struct clone_info *ci, - struct dm_target_io *tio, unsigned *len) -{ - struct bio *clone = &tio->clone; - - tio->len_ptr = len; - - __bio_clone_fast(clone, ci->bio); - if (len) - bio_setup_sector(clone, ci->sector, *len); - __map_bio(tio); -} - static void __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti, unsigned num_bios, unsigned *len) { struct bio_list blist = BIO_EMPTY_LIST; - struct bio *bio; - struct dm_target_io *tio; - - alloc_multiple_bios(&blist, ci, ti, num_bios); + struct bio *clone; - while ((bio = bio_list_pop(&blist))) { - tio = container_of(bio, struct dm_target_io, clone); - __clone_and_map_simple_bio(ci, tio, len); + switch (num_bios) { + case 0: + break; + case 1: + clone = alloc_tio(ci, ti, 0, len, GFP_NOIO); + if (len) + bio_setup_sector(clone, ci->sector, *len); + __map_bio(clone); + break; + default: + alloc_multiple_bios(&blist, ci, ti, num_bios, len); + while ((clone = bio_list_pop(&blist))) { + if (len) + bio_setup_sector(clone, ci->sector, *len); + __map_bio(clone); + } + break; } } @@ -1304,9 +1274,8 @@ static int __send_empty_flush(struct clone_info *ci) * need to reference it after submit. It's just used as * the basis for the clone(s). */ - bio_init(&flush_bio, NULL, 0); - flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC; - bio_set_dev(&flush_bio, ci->io->md->disk->part0); + bio_init(&flush_bio, ci->io->md->disk->part0, NULL, 0, + REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC); ci->bio = &flush_bio; ci->sector_count = 0; @@ -1319,25 +1288,6 @@ static int __send_empty_flush(struct clone_info *ci) return 0; } -static int __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti, - sector_t sector, unsigned *len) -{ - struct bio *bio = ci->bio; - struct dm_target_io *tio; - int r; - - tio = alloc_tio(ci, ti, 0, GFP_NOIO); - tio->len_ptr = len; - r = clone_bio(tio, bio, sector, *len); - if (r < 0) { - free_tio(tio); - return r; - } - __map_bio(tio); - - return 0; -} - static int __send_changing_extent_only(struct clone_info *ci, struct dm_target *ti, unsigned num_bios) { diff --git a/drivers/md/md-faulty.c b/drivers/md/md-faulty.c index c0dc6f2ef4a3..50ad818978a4 100644 --- a/drivers/md/md-faulty.c +++ b/drivers/md/md-faulty.c @@ -205,9 +205,9 @@ static bool faulty_make_request(struct mddev *mddev, struct bio *bio) } } if (failit) { - struct bio *b = bio_clone_fast(bio, GFP_NOIO, &mddev->bio_set); + struct bio *b = bio_alloc_clone(conf->rdev->bdev, bio, GFP_NOIO, + &mddev->bio_set); - bio_set_dev(b, conf->rdev->bdev); b->bi_private = bio; b->bi_end_io = faulty_fail; bio = b; diff --git a/drivers/md/md-multipath.c b/drivers/md/md-multipath.c index e7d6486f090f..3081a936350d 100644 --- a/drivers/md/md-multipath.c +++ b/drivers/md/md-multipath.c @@ -121,11 +121,9 @@ static bool multipath_make_request(struct mddev *mddev, struct bio * bio) } multipath = conf->multipaths + mp_bh->path; - bio_init(&mp_bh->bio, NULL, 0); - __bio_clone_fast(&mp_bh->bio, bio); + bio_init_clone(multipath->rdev->bdev, &mp_bh->bio, bio, GFP_NOIO); mp_bh->bio.bi_iter.bi_sector += multipath->rdev->data_offset; - bio_set_dev(&mp_bh->bio, multipath->rdev->bdev); mp_bh->bio.bi_opf |= REQ_FAILFAST_TRANSPORT; mp_bh->bio.bi_end_io = multipath_end_request; mp_bh->bio.bi_private = mp_bh; @@ -299,7 +297,6 @@ static void multipathd(struct md_thread *thread) md_check_recovery(mddev); for (;;) { - char b[BDEVNAME_SIZE]; spin_lock_irqsave(&conf->device_lock, flags); if (list_empty(head)) break; @@ -311,13 +308,13 @@ static void multipathd(struct md_thread *thread) bio->bi_iter.bi_sector = mp_bh->master_bio->bi_iter.bi_sector; if ((mp_bh->path = multipath_map (conf))<0) { - pr_err("multipath: %s: unrecoverable IO read error for block %llu\n", - bio_devname(bio, b), + pr_err("multipath: %pg: unrecoverable IO read error for block %llu\n", + bio->bi_bdev, (unsigned long long)bio->bi_iter.bi_sector); multipath_end_bh_io(mp_bh, BLK_STS_IOERR); } else { - pr_err("multipath: %s: redirecting sector %llu to another IO path\n", - bio_devname(bio, b), + pr_err("multipath: %pg: redirecting sector %llu to another IO path\n", + bio->bi_bdev, (unsigned long long)bio->bi_iter.bi_sector); *bio = *(mp_bh->master_bio); bio->bi_iter.bi_sector += diff --git a/drivers/md/md.c b/drivers/md/md.c index 4d38bd7dadd6..f210a55af201 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -562,11 +562,11 @@ static void submit_flushes(struct work_struct *ws) atomic_inc(&rdev->nr_pending); atomic_inc(&rdev->nr_pending); rcu_read_unlock(); - bi = bio_alloc_bioset(GFP_NOIO, 0, &mddev->bio_set); + bi = bio_alloc_bioset(rdev->bdev, 0, + REQ_OP_WRITE | REQ_PREFLUSH, + GFP_NOIO, &mddev->bio_set); bi->bi_end_io = md_end_flush; bi->bi_private = rdev; - bio_set_dev(bi, rdev->bdev); - bi->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; atomic_inc(&mddev->flush_pending); submit_bio(bi); rcu_read_lock(); @@ -955,7 +955,6 @@ void md_super_write(struct mddev *mddev, struct md_rdev *rdev, * If an error occurred, call md_error */ struct bio *bio; - int ff = 0; if (!page) return; @@ -963,11 +962,13 @@ void md_super_write(struct mddev *mddev, struct md_rdev *rdev, if (test_bit(Faulty, &rdev->flags)) return; - bio = bio_alloc_bioset(GFP_NOIO, 1, &mddev->sync_set); + bio = bio_alloc_bioset(rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev, + 1, + REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH | REQ_FUA, + GFP_NOIO, &mddev->sync_set); atomic_inc(&rdev->nr_pending); - bio_set_dev(bio, rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev); bio->bi_iter.bi_sector = sector; bio_add_page(bio, page, size, 0); bio->bi_private = rdev; @@ -976,8 +977,7 @@ void md_super_write(struct mddev *mddev, struct md_rdev *rdev, if (test_bit(MD_FAILFAST_SUPPORTED, &mddev->flags) && test_bit(FailFast, &rdev->flags) && !test_bit(LastDev, &rdev->flags)) - ff = MD_FAILFAST; - bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH | REQ_FUA | ff; + bio->bi_opf |= MD_FAILFAST; atomic_inc(&mddev->pending_writes); submit_bio(bio); @@ -998,13 +998,11 @@ int sync_page_io(struct md_rdev *rdev, sector_t sector, int size, struct bio bio; struct bio_vec bvec; - bio_init(&bio, &bvec, 1); - if (metadata_op && rdev->meta_bdev) - bio_set_dev(&bio, rdev->meta_bdev); + bio_init(&bio, rdev->meta_bdev, &bvec, 1, op | op_flags); else - bio_set_dev(&bio, rdev->bdev); - bio.bi_opf = op | op_flags; + bio_init(&bio, rdev->bdev, &bvec, 1, op | op_flags); + if (metadata_op) bio.bi_iter.bi_sector = sector + rdev->sb_start; else if (rdev->mddev->reshape_position != MaxSector && @@ -8636,13 +8634,14 @@ static void md_end_io_acct(struct bio *bio) */ void md_account_bio(struct mddev *mddev, struct bio **bio) { + struct block_device *bdev = (*bio)->bi_bdev; struct md_io_acct *md_io_acct; struct bio *clone; - if (!blk_queue_io_stat((*bio)->bi_bdev->bd_disk->queue)) + if (!blk_queue_io_stat(bdev->bd_disk->queue)) return; - clone = bio_clone_fast(*bio, GFP_NOIO, &mddev->io_acct_set); + clone = bio_alloc_clone(bdev, *bio, GFP_NOIO, &mddev->io_acct_set); md_io_acct = container_of(clone, struct md_io_acct, bio_clone); md_io_acct->orig_bio = *bio; md_io_acct->start_time = bio_start_io_acct(*bio); diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index e2d8acb1e988..03477e971699 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -1126,7 +1126,8 @@ static void alloc_behind_master_bio(struct r1bio *r1_bio, int i = 0; struct bio *behind_bio = NULL; - behind_bio = bio_alloc_bioset(GFP_NOIO, vcnt, &r1_bio->mddev->bio_set); + behind_bio = bio_alloc_bioset(NULL, vcnt, 0, GFP_NOIO, + &r1_bio->mddev->bio_set); if (!behind_bio) return; @@ -1319,13 +1320,13 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio, if (!r1bio_existed && blk_queue_io_stat(bio->bi_bdev->bd_disk->queue)) r1_bio->start_time = bio_start_io_acct(bio); - read_bio = bio_clone_fast(bio, gfp, &mddev->bio_set); + read_bio = bio_alloc_clone(mirror->rdev->bdev, bio, gfp, + &mddev->bio_set); r1_bio->bios[rdisk] = read_bio; read_bio->bi_iter.bi_sector = r1_bio->sector + mirror->rdev->data_offset; - bio_set_dev(read_bio, mirror->rdev->bdev); read_bio->bi_end_io = raid1_end_read_request; bio_set_op_attrs(read_bio, op, do_sync); if (test_bit(FailFast, &mirror->rdev->flags) && @@ -1545,24 +1546,25 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio, first_clone = 0; } - if (r1_bio->behind_master_bio) - mbio = bio_clone_fast(r1_bio->behind_master_bio, - GFP_NOIO, &mddev->bio_set); - else - mbio = bio_clone_fast(bio, GFP_NOIO, &mddev->bio_set); - if (r1_bio->behind_master_bio) { + mbio = bio_alloc_clone(rdev->bdev, + r1_bio->behind_master_bio, + GFP_NOIO, &mddev->bio_set); if (test_bit(CollisionCheck, &rdev->flags)) wait_for_serialization(rdev, r1_bio); if (test_bit(WriteMostly, &rdev->flags)) atomic_inc(&r1_bio->behind_remaining); - } else if (mddev->serialize_policy) - wait_for_serialization(rdev, r1_bio); + } else { + mbio = bio_alloc_clone(rdev->bdev, bio, GFP_NOIO, + &mddev->bio_set); + + if (mddev->serialize_policy) + wait_for_serialization(rdev, r1_bio); + } r1_bio->bios[i] = mbio; mbio->bi_iter.bi_sector = (r1_bio->sector + rdev->data_offset); - bio_set_dev(mbio, rdev->bdev); mbio->bi_end_io = raid1_end_write_request; mbio->bi_opf = bio_op(bio) | (bio->bi_opf & (REQ_SYNC | REQ_FUA)); if (test_bit(FailFast, &rdev->flags) && @@ -2070,15 +2072,14 @@ static int fix_sync_read_error(struct r1bio *r1_bio) } while (!success && d != r1_bio->read_disk); if (!success) { - char b[BDEVNAME_SIZE]; int abort = 0; /* Cannot read from anywhere, this block is lost. * Record a bad block on each device. If that doesn't * work just disable and interrupt the recovery. * Don't fail devices as that won't really help. */ - pr_crit_ratelimited("md/raid1:%s: %s: unrecoverable I/O read error for block %llu\n", - mdname(mddev), bio_devname(bio, b), + pr_crit_ratelimited("md/raid1:%s: %pg: unrecoverable I/O read error for block %llu\n", + mdname(mddev), bio->bi_bdev, (unsigned long long)r1_bio->sector); for (d = 0; d < conf->raid_disks * 2; d++) { rdev = conf->mirrors[d].rdev; @@ -2165,11 +2166,10 @@ static void process_checks(struct r1bio *r1_bio) continue; /* fixup the bio for reuse, but preserve errno */ status = b->bi_status; - bio_reset(b); + bio_reset(b, conf->mirrors[i].rdev->bdev, REQ_OP_READ); b->bi_status = status; b->bi_iter.bi_sector = r1_bio->sector + conf->mirrors[i].rdev->data_offset; - bio_set_dev(b, conf->mirrors[i].rdev->bdev); b->bi_end_io = end_sync_read; rp->raid_bio = r1_bio; b->bi_private = rp; @@ -2416,12 +2416,12 @@ static int narrow_write_error(struct r1bio *r1_bio, int i) /* Write at 'sector' for 'sectors'*/ if (test_bit(R1BIO_BehindIO, &r1_bio->state)) { - wbio = bio_clone_fast(r1_bio->behind_master_bio, - GFP_NOIO, - &mddev->bio_set); + wbio = bio_alloc_clone(rdev->bdev, + r1_bio->behind_master_bio, + GFP_NOIO, &mddev->bio_set); } else { - wbio = bio_clone_fast(r1_bio->master_bio, GFP_NOIO, - &mddev->bio_set); + wbio = bio_alloc_clone(rdev->bdev, r1_bio->master_bio, + GFP_NOIO, &mddev->bio_set); } bio_set_op_attrs(wbio, REQ_OP_WRITE, 0); @@ -2430,7 +2430,6 @@ static int narrow_write_error(struct r1bio *r1_bio, int i) bio_trim(wbio, sector - r1_bio->sector, sectors); wbio->bi_iter.bi_sector += rdev->data_offset; - bio_set_dev(wbio, rdev->bdev); if (submit_bio_wait(wbio) < 0) /* failure! */ @@ -2650,7 +2649,7 @@ static struct r1bio *raid1_alloc_init_r1buf(struct r1conf *conf) for (i = conf->poolinfo->raid_disks; i--; ) { bio = r1bio->bios[i]; rps = bio->bi_private; - bio_reset(bio); + bio_reset(bio, NULL, 0); bio->bi_private = rps; } r1bio->master_bio = NULL; diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 2b969f70a31f..5dd2e17e1d0e 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -1208,14 +1208,13 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio, if (blk_queue_io_stat(bio->bi_bdev->bd_disk->queue)) r10_bio->start_time = bio_start_io_acct(bio); - read_bio = bio_clone_fast(bio, gfp, &mddev->bio_set); + read_bio = bio_alloc_clone(rdev->bdev, bio, gfp, &mddev->bio_set); r10_bio->devs[slot].bio = read_bio; r10_bio->devs[slot].rdev = rdev; read_bio->bi_iter.bi_sector = r10_bio->devs[slot].addr + choose_data_offset(r10_bio, rdev); - bio_set_dev(read_bio, rdev->bdev); read_bio->bi_end_io = raid10_end_read_request; bio_set_op_attrs(read_bio, op, do_sync); if (test_bit(FailFast, &rdev->flags) && @@ -1255,7 +1254,7 @@ static void raid10_write_one_disk(struct mddev *mddev, struct r10bio *r10_bio, } else rdev = conf->mirrors[devnum].rdev; - mbio = bio_clone_fast(bio, GFP_NOIO, &mddev->bio_set); + mbio = bio_alloc_clone(rdev->bdev, bio, GFP_NOIO, &mddev->bio_set); if (replacement) r10_bio->devs[n_copy].repl_bio = mbio; else @@ -1263,7 +1262,6 @@ static void raid10_write_one_disk(struct mddev *mddev, struct r10bio *r10_bio, mbio->bi_iter.bi_sector = (r10_bio->devs[n_copy].addr + choose_data_offset(r10_bio, rdev)); - bio_set_dev(mbio, rdev->bdev); mbio->bi_end_io = raid10_end_write_request; bio_set_op_attrs(mbio, op, do_sync | do_fua); if (!replacement && test_bit(FailFast, @@ -1812,7 +1810,8 @@ retry_discard: */ if (r10_bio->devs[disk].bio) { struct md_rdev *rdev = conf->mirrors[disk].rdev; - mbio = bio_clone_fast(bio, GFP_NOIO, &mddev->bio_set); + mbio = bio_alloc_clone(bio->bi_bdev, bio, GFP_NOIO, + &mddev->bio_set); mbio->bi_end_io = raid10_end_discard_request; mbio->bi_private = r10_bio; r10_bio->devs[disk].bio = mbio; @@ -1825,7 +1824,8 @@ retry_discard: } if (r10_bio->devs[disk].repl_bio) { struct md_rdev *rrdev = conf->mirrors[disk].replacement; - rbio = bio_clone_fast(bio, GFP_NOIO, &mddev->bio_set); + rbio = bio_alloc_clone(bio->bi_bdev, bio, GFP_NOIO, + &mddev->bio_set); rbio->bi_end_io = raid10_end_discard_request; rbio->bi_private = r10_bio; r10_bio->devs[disk].repl_bio = rbio; @@ -2422,7 +2422,7 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio) * bi_vecs, as the read request might have corrupted these */ rp = get_resync_pages(tbio); - bio_reset(tbio); + bio_reset(tbio, conf->mirrors[d].rdev->bdev, REQ_OP_WRITE); md_bio_reset_resync_pages(tbio, rp, fbio->bi_iter.bi_size); @@ -2430,7 +2430,6 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio) tbio->bi_private = rp; tbio->bi_iter.bi_sector = r10_bio->devs[i].addr; tbio->bi_end_io = end_sync_write; - bio_set_op_attrs(tbio, REQ_OP_WRITE, 0); bio_copy_data(tbio, fbio); @@ -2441,7 +2440,6 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio) if (test_bit(FailFast, &conf->mirrors[d].rdev->flags)) tbio->bi_opf |= MD_FAILFAST; tbio->bi_iter.bi_sector += conf->mirrors[d].rdev->data_offset; - bio_set_dev(tbio, conf->mirrors[d].rdev->bdev); submit_bio_noacct(tbio); } @@ -2894,12 +2892,12 @@ static int narrow_write_error(struct r10bio *r10_bio, int i) if (sectors > sect_to_write) sectors = sect_to_write; /* Write at 'sector' for 'sectors' */ - wbio = bio_clone_fast(bio, GFP_NOIO, &mddev->bio_set); + wbio = bio_alloc_clone(rdev->bdev, bio, GFP_NOIO, + &mddev->bio_set); bio_trim(wbio, sector - bio->bi_iter.bi_sector, sectors); wsector = r10_bio->devs[i].addr + (sector - r10_bio->sector); wbio->bi_iter.bi_sector = wsector + choose_data_offset(r10_bio, rdev); - bio_set_dev(wbio, rdev->bdev); bio_set_op_attrs(wbio, REQ_OP_WRITE, 0); if (submit_bio_wait(wbio) < 0) @@ -3160,12 +3158,12 @@ static struct r10bio *raid10_alloc_init_r10buf(struct r10conf *conf) for (i = 0; i < nalloc; i++) { bio = r10bio->devs[i].bio; rp = bio->bi_private; - bio_reset(bio); + bio_reset(bio, NULL, 0); bio->bi_private = rp; bio = r10bio->devs[i].repl_bio; if (bio) { rp = bio->bi_private; - bio_reset(bio); + bio_reset(bio, NULL, 0); bio->bi_private = rp; } } @@ -4892,14 +4890,12 @@ read_more: return sectors_done; } - read_bio = bio_alloc_bioset(GFP_KERNEL, RESYNC_PAGES, &mddev->bio_set); - - bio_set_dev(read_bio, rdev->bdev); + read_bio = bio_alloc_bioset(rdev->bdev, RESYNC_PAGES, REQ_OP_READ, + GFP_KERNEL, &mddev->bio_set); read_bio->bi_iter.bi_sector = (r10_bio->devs[r10_bio->read_slot].addr + rdev->data_offset); read_bio->bi_private = r10_bio; read_bio->bi_end_io = end_reshape_read; - bio_set_op_attrs(read_bio, REQ_OP_READ, 0); r10_bio->master_bio = read_bio; r10_bio->read_slot = r10_bio->devs[r10_bio->read_slot].devnum; diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c index 0b5dcaabbc15..86e2bb89d9c7 100644 --- a/drivers/md/raid5-cache.c +++ b/drivers/md/raid5-cache.c @@ -735,10 +735,9 @@ static void r5l_submit_current_io(struct r5l_log *log) static struct bio *r5l_bio_alloc(struct r5l_log *log) { - struct bio *bio = bio_alloc_bioset(GFP_NOIO, BIO_MAX_VECS, &log->bs); + struct bio *bio = bio_alloc_bioset(log->rdev->bdev, BIO_MAX_VECS, + REQ_OP_WRITE, GFP_NOIO, &log->bs); - bio_set_op_attrs(bio, REQ_OP_WRITE, 0); - bio_set_dev(bio, log->rdev->bdev); bio->bi_iter.bi_sector = log->rdev->data_offset + log->log_start; return bio; @@ -1302,10 +1301,9 @@ void r5l_flush_stripe_to_raid(struct r5l_log *log) if (!do_flush) return; - bio_reset(&log->flush_bio); - bio_set_dev(&log->flush_bio, log->rdev->bdev); + bio_reset(&log->flush_bio, log->rdev->bdev, + REQ_OP_WRITE | REQ_PREFLUSH); log->flush_bio.bi_end_io = r5l_log_flush_endio; - log->flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; submit_bio(&log->flush_bio); } @@ -1634,7 +1632,8 @@ static int r5l_recovery_allocate_ra_pool(struct r5l_log *log, { struct page *page; - ctx->ra_bio = bio_alloc_bioset(GFP_KERNEL, BIO_MAX_VECS, &log->bs); + ctx->ra_bio = bio_alloc_bioset(NULL, BIO_MAX_VECS, 0, GFP_KERNEL, + &log->bs); if (!ctx->ra_bio) return -ENOMEM; @@ -1678,9 +1677,7 @@ static int r5l_recovery_fetch_ra_pool(struct r5l_log *log, struct r5l_recovery_ctx *ctx, sector_t offset) { - bio_reset(ctx->ra_bio); - bio_set_dev(ctx->ra_bio, log->rdev->bdev); - bio_set_op_attrs(ctx->ra_bio, REQ_OP_READ, 0); + bio_reset(ctx->ra_bio, log->rdev->bdev, REQ_OP_READ); ctx->ra_bio->bi_iter.bi_sector = log->rdev->data_offset + offset; ctx->valid_pages = 0; @@ -3108,7 +3105,7 @@ int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev) INIT_LIST_HEAD(&log->io_end_ios); INIT_LIST_HEAD(&log->flushing_ios); INIT_LIST_HEAD(&log->finished_ios); - bio_init(&log->flush_bio, NULL, 0); + bio_init(&log->flush_bio, NULL, NULL, 0, 0); log->io_kc = KMEM_CACHE(r5l_io_unit, 0); if (!log->io_kc) diff --git a/drivers/md/raid5-ppl.c b/drivers/md/raid5-ppl.c index 4ab417915d7f..bbb5673104ec 100644 --- a/drivers/md/raid5-ppl.c +++ b/drivers/md/raid5-ppl.c @@ -250,7 +250,7 @@ static struct ppl_io_unit *ppl_new_iounit(struct ppl_log *log, INIT_LIST_HEAD(&io->stripe_list); atomic_set(&io->pending_stripes, 0); atomic_set(&io->pending_flushes, 0); - bio_init(&io->bio, io->biovec, PPL_IO_INLINE_BVECS); + bio_init(&io->bio, NULL, io->biovec, PPL_IO_INLINE_BVECS, 0); pplhdr = page_address(io->header_page); clear_page(pplhdr); @@ -416,12 +416,10 @@ static void ppl_log_endio(struct bio *bio) static void ppl_submit_iounit_bio(struct ppl_io_unit *io, struct bio *bio) { - char b[BDEVNAME_SIZE]; - - pr_debug("%s: seq: %llu size: %u sector: %llu dev: %s\n", + pr_debug("%s: seq: %llu size: %u sector: %llu dev: %pg\n", __func__, io->seq, bio->bi_iter.bi_size, (unsigned long long)bio->bi_iter.bi_sector, - bio_devname(bio, b)); + bio->bi_bdev); submit_bio(bio); } @@ -496,11 +494,10 @@ static void ppl_submit_iounit(struct ppl_io_unit *io) if (!bio_add_page(bio, sh->ppl_page, PAGE_SIZE, 0)) { struct bio *prev = bio; - bio = bio_alloc_bioset(GFP_NOIO, BIO_MAX_VECS, + bio = bio_alloc_bioset(prev->bi_bdev, BIO_MAX_VECS, + prev->bi_opf, GFP_NOIO, &ppl_conf->bs); - bio->bi_opf = prev->bi_opf; bio->bi_write_hint = prev->bi_write_hint; - bio_copy_dev(bio, prev); bio->bi_iter.bi_sector = bio_end_sector(prev); bio_add_page(bio, sh->ppl_page, PAGE_SIZE, 0); @@ -590,9 +587,8 @@ static void ppl_flush_endio(struct bio *bio) struct ppl_log *log = io->log; struct ppl_conf *ppl_conf = log->ppl_conf; struct r5conf *conf = ppl_conf->mddev->private; - char b[BDEVNAME_SIZE]; - pr_debug("%s: dev: %s\n", __func__, bio_devname(bio, b)); + pr_debug("%s: dev: %pg\n", __func__, bio->bi_bdev); if (bio->bi_status) { struct md_rdev *rdev; @@ -635,16 +631,14 @@ static void ppl_do_flush(struct ppl_io_unit *io) if (bdev) { struct bio *bio; - char b[BDEVNAME_SIZE]; - bio = bio_alloc_bioset(GFP_NOIO, 0, &ppl_conf->flush_bs); - bio_set_dev(bio, bdev); + bio = bio_alloc_bioset(bdev, 0, GFP_NOIO, + REQ_OP_WRITE | REQ_PREFLUSH, + &ppl_conf->flush_bs); bio->bi_private = io; - bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; bio->bi_end_io = ppl_flush_endio; - pr_debug("%s: dev: %s\n", __func__, - bio_devname(bio, b)); + pr_debug("%s: dev: %ps\n", __func__, bio->bi_bdev); submit_bio(bio); flushed_disks++; diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index ffe720c73b0a..8891aaba6596 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -2310,8 +2310,8 @@ static struct stripe_head *alloc_stripe(struct kmem_cache *sc, gfp_t gfp, for (i = 0; i < disks; i++) { struct r5dev *dev = &sh->dev[i]; - bio_init(&dev->req, &dev->vec, 1); - bio_init(&dev->rreq, &dev->rvec, 1); + bio_init(&dev->req, NULL, &dev->vec, 1, 0); + bio_init(&dev->rreq, NULL, &dev->rvec, 1, 0); } if (raid5_has_ppl(conf)) { @@ -2677,7 +2677,7 @@ static void raid5_end_read_request(struct bio * bi) (unsigned long long)sh->sector, i, atomic_read(&sh->count), bi->bi_status); if (i == disks) { - bio_reset(bi); + bio_reset(bi, NULL, 0); BUG(); return; } @@ -2785,7 +2785,7 @@ static void raid5_end_read_request(struct bio * bi) } } rdev_dec_pending(rdev, conf->mddev); - bio_reset(bi); + bio_reset(bi, NULL, 0); clear_bit(R5_LOCKED, &sh->dev[i].flags); set_bit(STRIPE_HANDLE, &sh->state); raid5_release_stripe(sh); @@ -2823,7 +2823,7 @@ static void raid5_end_write_request(struct bio *bi) (unsigned long long)sh->sector, i, atomic_read(&sh->count), bi->bi_status); if (i == disks) { - bio_reset(bi); + bio_reset(bi, NULL, 0); BUG(); return; } @@ -2860,7 +2860,7 @@ static void raid5_end_write_request(struct bio *bi) if (sh->batch_head && bi->bi_status && !replacement) set_bit(STRIPE_BATCH_ERR, &sh->batch_head->state); - bio_reset(bi); + bio_reset(bi, NULL, 0); if (!test_and_clear_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags)) clear_bit(R5_LOCKED, &sh->dev[i].flags); set_bit(STRIPE_HANDLE, &sh->state); @@ -5438,14 +5438,14 @@ static int raid5_read_one_chunk(struct mddev *mddev, struct bio *raid_bio) return 0; } - align_bio = bio_clone_fast(raid_bio, GFP_NOIO, &mddev->io_acct_set); + align_bio = bio_alloc_clone(rdev->bdev, raid_bio, GFP_NOIO, + &mddev->io_acct_set); md_io_acct = container_of(align_bio, struct md_io_acct, bio_clone); raid_bio->bi_next = (void *)rdev; if (blk_queue_io_stat(raid_bio->bi_bdev->bd_disk->queue)) md_io_acct->start_time = bio_start_io_acct(raid_bio); md_io_acct->orig_bio = raid_bio; - bio_set_dev(align_bio, rdev->bdev); align_bio->bi_end_io = raid5_align_endio; align_bio->bi_private = md_io_acct; align_bio->bi_iter.bi_sector = sector; diff --git a/drivers/memstick/core/ms_block.c b/drivers/memstick/core/ms_block.c index 0cda6c6baefc..3993bdd4b519 100644 --- a/drivers/memstick/core/ms_block.c +++ b/drivers/memstick/core/ms_block.c @@ -1943,22 +1943,6 @@ static void msb_io_work(struct work_struct *work) static DEFINE_IDR(msb_disk_idr); /*set of used disk numbers */ static DEFINE_MUTEX(msb_disk_lock); /* protects against races in open/release */ -static int msb_bd_open(struct block_device *bdev, fmode_t mode) -{ - struct gendisk *disk = bdev->bd_disk; - struct msb_data *msb = disk->private_data; - - dbg_verbose("block device open"); - - mutex_lock(&msb_disk_lock); - - if (msb && msb->card) - msb->usage_count++; - - mutex_unlock(&msb_disk_lock); - return 0; -} - static void msb_data_clear(struct msb_data *msb) { kfree(msb->boot_page); @@ -1968,33 +1952,6 @@ static void msb_data_clear(struct msb_data *msb) msb->card = NULL; } -static int msb_disk_release(struct gendisk *disk) -{ - struct msb_data *msb = disk->private_data; - - dbg_verbose("block device release"); - mutex_lock(&msb_disk_lock); - - if (msb) { - if (msb->usage_count) - msb->usage_count--; - - if (!msb->usage_count) { - disk->private_data = NULL; - idr_remove(&msb_disk_idr, msb->disk_id); - put_disk(disk); - kfree(msb); - } - } - mutex_unlock(&msb_disk_lock); - return 0; -} - -static void msb_bd_release(struct gendisk *disk, fmode_t mode) -{ - msb_disk_release(disk); -} - static int msb_bd_getgeo(struct block_device *bdev, struct hd_geometry *geo) { @@ -2003,6 +1960,17 @@ static int msb_bd_getgeo(struct block_device *bdev, return 0; } +static void msb_bd_free_disk(struct gendisk *disk) +{ + struct msb_data *msb = disk->private_data; + + mutex_lock(&msb_disk_lock); + idr_remove(&msb_disk_idr, msb->disk_id); + mutex_unlock(&msb_disk_lock); + + kfree(msb); +} + static blk_status_t msb_queue_rq(struct blk_mq_hw_ctx *hctx, const struct blk_mq_queue_data *bd) { @@ -2096,10 +2064,9 @@ static void msb_start(struct memstick_dev *card) } static const struct block_device_operations msb_bdops = { - .open = msb_bd_open, - .release = msb_bd_release, - .getgeo = msb_bd_getgeo, - .owner = THIS_MODULE + .owner = THIS_MODULE, + .getgeo = msb_bd_getgeo, + .free_disk = msb_bd_free_disk, }; static const struct blk_mq_ops msb_mq_ops = { @@ -2147,7 +2114,6 @@ static int msb_init_disk(struct memstick_dev *card) set_capacity(msb->disk, capacity); dbg("Set total disk size to %lu sectors", capacity); - msb->usage_count = 1; msb->io_queue = alloc_ordered_workqueue("ms_block", WQ_MEM_RECLAIM); INIT_WORK(&msb->io_work, msb_io_work); sg_init_table(msb->prealloc_sg, MS_BLOCK_MAX_SEGS+1); @@ -2229,7 +2195,7 @@ static void msb_remove(struct memstick_dev *card) msb_data_clear(msb); mutex_unlock(&msb_disk_lock); - msb_disk_release(msb->disk); + put_disk(msb->disk); memstick_set_drvdata(card, NULL); } diff --git a/drivers/memstick/core/ms_block.h b/drivers/memstick/core/ms_block.h index 122e1a8a8bd5..7058f9aefeb9 100644 --- a/drivers/memstick/core/ms_block.h +++ b/drivers/memstick/core/ms_block.h @@ -143,7 +143,6 @@ struct ms_boot_page { } __packed; struct msb_data { - unsigned int usage_count; struct memstick_dev *card; struct gendisk *disk; struct request_queue *queue; diff --git a/drivers/memstick/core/mspro_block.c b/drivers/memstick/core/mspro_block.c index c0450397b673..725ba74ded30 100644 --- a/drivers/memstick/core/mspro_block.c +++ b/drivers/memstick/core/mspro_block.c @@ -133,7 +133,6 @@ struct mspro_devinfo { struct mspro_block_data { struct memstick_dev *card; - unsigned int usage_count; unsigned int caps; struct gendisk *disk; struct request_queue *queue; @@ -178,53 +177,16 @@ static int mspro_block_complete_req(struct memstick_dev *card, int error); /*** Block device ***/ -static int mspro_block_bd_open(struct block_device *bdev, fmode_t mode) -{ - struct gendisk *disk = bdev->bd_disk; - struct mspro_block_data *msb = disk->private_data; - int rc = -ENXIO; - - mutex_lock(&mspro_block_disk_lock); - - if (msb && msb->card) { - msb->usage_count++; - if ((mode & FMODE_WRITE) && msb->read_only) - rc = -EROFS; - else - rc = 0; - } - - mutex_unlock(&mspro_block_disk_lock); - - return rc; -} - - -static void mspro_block_disk_release(struct gendisk *disk) +static void mspro_block_bd_free_disk(struct gendisk *disk) { struct mspro_block_data *msb = disk->private_data; int disk_id = MINOR(disk_devt(disk)) >> MSPRO_BLOCK_PART_SHIFT; mutex_lock(&mspro_block_disk_lock); - - if (msb) { - if (msb->usage_count) - msb->usage_count--; - - if (!msb->usage_count) { - kfree(msb); - disk->private_data = NULL; - idr_remove(&mspro_block_disk_idr, disk_id); - put_disk(disk); - } - } - + idr_remove(&mspro_block_disk_idr, disk_id); mutex_unlock(&mspro_block_disk_lock); -} -static void mspro_block_bd_release(struct gendisk *disk, fmode_t mode) -{ - mspro_block_disk_release(disk); + kfree(msb); } static int mspro_block_bd_getgeo(struct block_device *bdev, @@ -240,10 +202,9 @@ static int mspro_block_bd_getgeo(struct block_device *bdev, } static const struct block_device_operations ms_block_bdops = { - .open = mspro_block_bd_open, - .release = mspro_block_bd_release, - .getgeo = mspro_block_bd_getgeo, - .owner = THIS_MODULE + .owner = THIS_MODULE, + .getgeo = mspro_block_bd_getgeo, + .free_disk = mspro_block_bd_free_disk, }; /*** Information ***/ @@ -1226,7 +1187,6 @@ static int mspro_block_init_disk(struct memstick_dev *card) msb->disk->first_minor = disk_id << MSPRO_BLOCK_PART_SHIFT; msb->disk->minors = 1 << MSPRO_BLOCK_PART_SHIFT; msb->disk->fops = &ms_block_bdops; - msb->usage_count = 1; msb->disk->private_data = msb; sprintf(msb->disk->disk_name, "mspblk%d", disk_id); @@ -1239,6 +1199,9 @@ static int mspro_block_init_disk(struct memstick_dev *card) set_capacity(msb->disk, capacity); dev_dbg(&card->dev, "capacity set %ld\n", capacity); + if (msb->read_only) + set_disk_ro(msb->disk, true); + rc = device_add_disk(&card->dev, msb->disk, NULL); if (rc) goto out_cleanup_disk; @@ -1341,7 +1304,7 @@ static void mspro_block_remove(struct memstick_dev *card) mspro_block_data_clear(msb); mutex_unlock(&mspro_block_disk_lock); - mspro_block_disk_release(msb->disk); + put_disk(msb->disk); memstick_set_drvdata(card, NULL); } diff --git a/drivers/mtd/mtdswap.c b/drivers/mtd/mtdswap.c index e86b04bc1d6b..dc7f1532a37f 100644 --- a/drivers/mtd/mtdswap.c +++ b/drivers/mtd/mtdswap.c @@ -19,7 +19,7 @@ #include <linux/sched.h> #include <linux/slab.h> #include <linux/vmalloc.h> -#include <linux/genhd.h> +#include <linux/blkdev.h> #include <linux/swap.h> #include <linux/debugfs.h> #include <linux/seq_file.h> diff --git a/drivers/mtd/nand/raw/sharpsl.c b/drivers/mtd/nand/raw/sharpsl.c index 5612ee628425..52ce5162538a 100644 --- a/drivers/mtd/nand/raw/sharpsl.c +++ b/drivers/mtd/nand/raw/sharpsl.c @@ -6,7 +6,6 @@ * Based on Sharp's NAND driver sharp_sl.c */ -#include <linux/genhd.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/delay.h> diff --git a/drivers/nvdimm/blk.c b/drivers/nvdimm/blk.c index 228c33b8d1d6..c1db43524d75 100644 --- a/drivers/nvdimm/blk.c +++ b/drivers/nvdimm/blk.c @@ -6,7 +6,6 @@ #include <linux/blkdev.h> #include <linux/fs.h> -#include <linux/genhd.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/nd.h> diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c index da3f007a1211..cbd994f7f1fe 100644 --- a/drivers/nvdimm/btt.c +++ b/drivers/nvdimm/btt.c @@ -11,7 +11,6 @@ #include <linux/device.h> #include <linux/mutex.h> #include <linux/hdreg.h> -#include <linux/genhd.h> #include <linux/sizes.h> #include <linux/ndctl.h> #include <linux/fs.h> diff --git a/drivers/nvdimm/btt_devs.c b/drivers/nvdimm/btt_devs.c index 8b52e5144f08..e5a58520d398 100644 --- a/drivers/nvdimm/btt_devs.c +++ b/drivers/nvdimm/btt_devs.c @@ -4,7 +4,6 @@ */ #include <linux/blkdev.h> #include <linux/device.h> -#include <linux/genhd.h> #include <linux/sizes.h> #include <linux/slab.h> #include <linux/fs.h> diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c index 9dc7f3edd42b..5bbe31b08581 100644 --- a/drivers/nvdimm/bus.c +++ b/drivers/nvdimm/bus.c @@ -11,7 +11,6 @@ #include <linux/blkdev.h> #include <linux/fcntl.h> #include <linux/async.h> -#include <linux/genhd.h> #include <linux/ndctl.h> #include <linux/sched.h> #include <linux/slab.h> diff --git a/drivers/nvdimm/nd_virtio.c b/drivers/nvdimm/nd_virtio.c index 10351d5b49fa..c6a648fd8744 100644 --- a/drivers/nvdimm/nd_virtio.c +++ b/drivers/nvdimm/nd_virtio.c @@ -105,12 +105,12 @@ int async_pmem_flush(struct nd_region *nd_region, struct bio *bio) * parent bio. Otherwise directly call nd_region flush. */ if (bio && bio->bi_iter.bi_sector != -1) { - struct bio *child = bio_alloc(GFP_ATOMIC, 0); + struct bio *child = bio_alloc(bio->bi_bdev, 0, REQ_PREFLUSH, + GFP_ATOMIC); if (!child) return -ENOMEM; - bio_copy_dev(child, bio); - child->bi_opf = REQ_PREFLUSH; + bio_clone_blkg_association(child, bio); child->bi_iter.bi_sector = -1; bio_chain(child, bio); submit_bio(child); diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c index 58eda16f5c53..c31e184bfa45 100644 --- a/drivers/nvdimm/pfn_devs.c +++ b/drivers/nvdimm/pfn_devs.c @@ -5,7 +5,6 @@ #include <linux/memremap.h> #include <linux/blkdev.h> #include <linux/device.h> -#include <linux/genhd.h> #include <linux/sizes.h> #include <linux/slab.h> #include <linux/fs.h> diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c index 70ca9dfc1771..95c2bbb0b2f5 100644 --- a/drivers/nvme/target/io-cmd-bdev.c +++ b/drivers/nvme/target/io-cmd-bdev.c @@ -267,15 +267,15 @@ static void nvmet_bdev_execute_rw(struct nvmet_req *req) if (nvmet_use_inline_bvec(req)) { bio = &req->b.inline_bio; - bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec)); + bio_init(bio, req->ns->bdev, req->inline_bvec, + ARRAY_SIZE(req->inline_bvec), op); } else { - bio = bio_alloc(GFP_KERNEL, bio_max_segs(sg_cnt)); + bio = bio_alloc(req->ns->bdev, bio_max_segs(sg_cnt), op, + GFP_KERNEL); } - bio_set_dev(bio, req->ns->bdev); bio->bi_iter.bi_sector = sector; bio->bi_private = req; bio->bi_end_io = nvmet_bio_done; - bio->bi_opf = op; blk_start_plug(&plug); if (req->metadata_len) @@ -296,10 +296,9 @@ static void nvmet_bdev_execute_rw(struct nvmet_req *req) } } - bio = bio_alloc(GFP_KERNEL, bio_max_segs(sg_cnt)); - bio_set_dev(bio, req->ns->bdev); + bio = bio_alloc(req->ns->bdev, bio_max_segs(sg_cnt), + op, GFP_KERNEL); bio->bi_iter.bi_sector = sector; - bio->bi_opf = op; bio_chain(bio, prev); submit_bio(prev); @@ -328,11 +327,10 @@ static void nvmet_bdev_execute_flush(struct nvmet_req *req) if (!nvmet_check_transfer_len(req, 0)) return; - bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec)); - bio_set_dev(bio, req->ns->bdev); + bio_init(bio, req->ns->bdev, req->inline_bvec, + ARRAY_SIZE(req->inline_bvec), REQ_OP_WRITE | REQ_PREFLUSH); bio->bi_private = req; bio->bi_end_io = nvmet_bio_done; - bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; submit_bio(bio); } diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c index 9e5b89ae29df..a810bf569fff 100644 --- a/drivers/nvme/target/passthru.c +++ b/drivers/nvme/target/passthru.c @@ -206,12 +206,13 @@ static int nvmet_passthru_map_sg(struct nvmet_req *req, struct request *rq) if (nvmet_use_inline_bvec(req)) { bio = &req->p.inline_bio; - bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec)); + bio_init(bio, NULL, req->inline_bvec, + ARRAY_SIZE(req->inline_bvec), req_op(rq)); } else { - bio = bio_alloc(GFP_KERNEL, bio_max_segs(req->sg_cnt)); + bio = bio_alloc(NULL, bio_max_segs(req->sg_cnt), req_op(rq), + GFP_KERNEL); bio->bi_end_io = bio_put; } - bio->bi_opf = req_op(rq); for_each_sg(req->sg, sg, req->sg_cnt, i) { if (bio_add_pc_page(rq->q, bio, sg_page(sg), sg->length, diff --git a/drivers/nvme/target/zns.c b/drivers/nvme/target/zns.c index 46bc30fe85d2..3e421217a7ad 100644 --- a/drivers/nvme/target/zns.c +++ b/drivers/nvme/target/zns.c @@ -412,10 +412,10 @@ static u16 nvmet_bdev_zone_mgmt_emulate_all(struct nvmet_req *req) while (sector < get_capacity(bdev->bd_disk)) { if (test_bit(blk_queue_zone_no(q, sector), d.zbitmap)) { - bio = blk_next_bio(bio, 0, GFP_KERNEL); - bio->bi_opf = zsa_req_op(req->cmd->zms.zsa) | REQ_SYNC; + bio = blk_next_bio(bio, bdev, 0, + zsa_req_op(req->cmd->zms.zsa) | REQ_SYNC, + GFP_KERNEL); bio->bi_iter.bi_sector = sector; - bio_set_dev(bio, bdev); /* This may take a while, so be nice to others */ cond_resched(); } @@ -522,6 +522,7 @@ static void nvmet_bdev_zone_append_bio_done(struct bio *bio) void nvmet_bdev_execute_zone_append(struct nvmet_req *req) { sector_t sect = nvmet_lba_to_sect(req->ns, req->cmd->rw.slba); + const unsigned int op = REQ_OP_ZONE_APPEND | REQ_SYNC | REQ_IDLE; u16 status = NVME_SC_SUCCESS; unsigned int total_len = 0; struct scatterlist *sg; @@ -551,14 +552,13 @@ void nvmet_bdev_execute_zone_append(struct nvmet_req *req) if (nvmet_use_inline_bvec(req)) { bio = &req->z.inline_bio; - bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec)); + bio_init(bio, req->ns->bdev, req->inline_bvec, + ARRAY_SIZE(req->inline_bvec), op); } else { - bio = bio_alloc(GFP_KERNEL, req->sg_cnt); + bio = bio_alloc(req->ns->bdev, req->sg_cnt, op, GFP_KERNEL); } - bio->bi_opf = REQ_OP_ZONE_APPEND | REQ_SYNC | REQ_IDLE; bio->bi_end_io = nvmet_bdev_zone_append_bio_done; - bio_set_dev(bio, req->ns->bdev); bio->bi_iter.bi_sector = sect; bio->bi_private = req; if (req->cmd->rw.control & cpu_to_le16(NVME_RW_FUA)) diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h index 8b458010f88a..3b7af00a7825 100644 --- a/drivers/s390/block/dasd_int.h +++ b/drivers/s390/block/dasd_int.h @@ -47,7 +47,6 @@ #include <linux/module.h> #include <linux/wait.h> #include <linux/blkdev.h> -#include <linux/genhd.h> #include <linux/hdreg.h> #include <linux/interrupt.h> #include <linux/log2.h> diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c index 61ecdcb2cc6a..2a9c0ddcade5 100644 --- a/drivers/s390/block/scm_blk.c +++ b/drivers/s390/block/scm_blk.c @@ -15,7 +15,6 @@ #include <linux/module.h> #include <linux/blkdev.h> #include <linux/blk-mq.h> -#include <linux/genhd.h> #include <linux/slab.h> #include <linux/list.h> #include <asm/eadm.h> diff --git a/drivers/s390/block/scm_blk.h b/drivers/s390/block/scm_blk.h index a05a4297cfae..af82b3214774 100644 --- a/drivers/s390/block/scm_blk.h +++ b/drivers/s390/block/scm_blk.h @@ -6,7 +6,6 @@ #include <linux/spinlock.h> #include <linux/blkdev.h> #include <linux/blk-mq.h> -#include <linux/genhd.h> #include <linux/list.h> #include <asm/debug.h> diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index 2104973a35cd..911cc72dd7ac 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c @@ -23,7 +23,6 @@ #include <linux/slab.h> #include <linux/types.h> #include <linux/string.h> -#include <linux/genhd.h> #include <linux/fs.h> #include <linux/init.h> #include <linux/proc_fs.h> diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 0a70aa763a96..e30bc51578e9 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -1276,7 +1276,7 @@ scsi_device_state_check(struct scsi_device *sdev, struct request *req) * power management commands. */ if (req && !(req->rq_flags & RQF_PM)) - return BLK_STS_IOERR; + return BLK_STS_OFFLINE; return BLK_STS_OK; } } diff --git a/drivers/scsi/scsicam.c b/drivers/scsi/scsicam.c index 0ffdb8f2995f..acdc0aceca5e 100644 --- a/drivers/scsi/scsicam.c +++ b/drivers/scsi/scsicam.c @@ -14,7 +14,6 @@ #include <linux/module.h> #include <linux/slab.h> #include <linux/fs.h> -#include <linux/genhd.h> #include <linux/kernel.h> #include <linux/blkdev.h> #include <linux/pagemap.h> diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 62eb9921cc94..73e6f5f0f37c 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -38,7 +38,6 @@ #include <linux/kernel.h> #include <linux/mm.h> #include <linux/bio.h> -#include <linux/genhd.h> #include <linux/hdreg.h> #include <linux/errno.h> #include <linux/idr.h> @@ -122,11 +121,6 @@ static void scsi_disk_release(struct device *cdev); static DEFINE_IDA(sd_index_ida); -/* This semaphore is used to mediate the 0->1 reference get in the - * face of object destruction (i.e. we can't allow a get on an - * object after last put) */ -static DEFINE_MUTEX(sd_ref_mutex); - static struct kmem_cache *sd_cdb_cache; static mempool_t *sd_cdb_pool; static mempool_t *sd_page_pool; @@ -664,33 +658,6 @@ static int sd_major(int major_idx) } } -static struct scsi_disk *scsi_disk_get(struct gendisk *disk) -{ - struct scsi_disk *sdkp = NULL; - - mutex_lock(&sd_ref_mutex); - - if (disk->private_data) { - sdkp = scsi_disk(disk); - if (scsi_device_get(sdkp->device) == 0) - get_device(&sdkp->dev); - else - sdkp = NULL; - } - mutex_unlock(&sd_ref_mutex); - return sdkp; -} - -static void scsi_disk_put(struct scsi_disk *sdkp) -{ - struct scsi_device *sdev = sdkp->device; - - mutex_lock(&sd_ref_mutex); - put_device(&sdkp->dev); - scsi_device_put(sdev); - mutex_unlock(&sd_ref_mutex); -} - #ifdef CONFIG_BLK_SED_OPAL static int sd_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len, bool send) @@ -1419,17 +1386,15 @@ static bool sd_need_revalidate(struct block_device *bdev, **/ static int sd_open(struct block_device *bdev, fmode_t mode) { - struct scsi_disk *sdkp = scsi_disk_get(bdev->bd_disk); - struct scsi_device *sdev; + struct scsi_disk *sdkp = scsi_disk(bdev->bd_disk); + struct scsi_device *sdev = sdkp->device; int retval; - if (!sdkp) + if (scsi_device_get(sdev)) return -ENXIO; SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_open\n")); - sdev = sdkp->device; - /* * If the device is in error recovery, wait until it is done. * If the device is offline, then disallow any access to it. @@ -1474,7 +1439,7 @@ static int sd_open(struct block_device *bdev, fmode_t mode) return 0; error_out: - scsi_disk_put(sdkp); + scsi_device_put(sdev); return retval; } @@ -1503,7 +1468,7 @@ static void sd_release(struct gendisk *disk, fmode_t mode) scsi_set_medium_removal(sdev, SCSI_REMOVAL_ALLOW); } - scsi_disk_put(sdkp); + scsi_device_put(sdev); } static int sd_getgeo(struct block_device *bdev, struct hd_geometry *geo) @@ -1617,7 +1582,7 @@ static int media_not_present(struct scsi_disk *sdkp, **/ static unsigned int sd_check_events(struct gendisk *disk, unsigned int clearing) { - struct scsi_disk *sdkp = scsi_disk_get(disk); + struct scsi_disk *sdkp = disk->private_data; struct scsi_device *sdp; int retval; bool disk_changed; @@ -1680,7 +1645,6 @@ out: */ disk_changed = sdp->changed; sdp->changed = 0; - scsi_disk_put(sdkp); return disk_changed ? DISK_EVENT_MEDIA_CHANGE : 0; } @@ -1888,6 +1852,13 @@ static const struct pr_ops sd_pr_ops = { .pr_clear = sd_pr_clear, }; +static void scsi_disk_free_disk(struct gendisk *disk) +{ + struct scsi_disk *sdkp = scsi_disk(disk); + + put_device(&sdkp->disk_dev); +} + static const struct block_device_operations sd_fops = { .owner = THIS_MODULE, .open = sd_open, @@ -1899,6 +1870,7 @@ static const struct block_device_operations sd_fops = { .unlock_native_capacity = sd_unlock_native_capacity, .report_zones = sd_zbc_report_zones, .get_unique_id = sd_get_unique_id, + .free_disk = scsi_disk_free_disk, .pr_ops = &sd_pr_ops, }; @@ -3516,7 +3488,6 @@ static int sd_probe(struct device *dev) } sdkp->device = sdp; - sdkp->driver = &sd_template; sdkp->disk = gd; sdkp->index = index; sdkp->max_retries = SD_MAX_RETRIES; @@ -3531,14 +3502,14 @@ static int sd_probe(struct device *dev) SD_MOD_TIMEOUT); } - device_initialize(&sdkp->dev); - sdkp->dev.parent = get_device(dev); - sdkp->dev.class = &sd_disk_class; - dev_set_name(&sdkp->dev, "%s", dev_name(dev)); + device_initialize(&sdkp->disk_dev); + sdkp->disk_dev.parent = get_device(dev); + sdkp->disk_dev.class = &sd_disk_class; + dev_set_name(&sdkp->disk_dev, "%s", dev_name(dev)); - error = device_add(&sdkp->dev); + error = device_add(&sdkp->disk_dev); if (error) { - put_device(&sdkp->dev); + put_device(&sdkp->disk_dev); goto out; } @@ -3549,7 +3520,7 @@ static int sd_probe(struct device *dev) gd->minors = SD_MINORS; gd->fops = &sd_fops; - gd->private_data = &sdkp->driver; + gd->private_data = sdkp; /* defaults, until the device tells us otherwise */ sdp->sector_size = 512; @@ -3579,7 +3550,7 @@ static int sd_probe(struct device *dev) error = device_add_disk(dev, gd, NULL); if (error) { - put_device(&sdkp->dev); + put_device(&sdkp->disk_dev); goto out; } @@ -3625,58 +3596,26 @@ static int sd_probe(struct device *dev) **/ static int sd_remove(struct device *dev) { - struct scsi_disk *sdkp; + struct scsi_disk *sdkp = dev_get_drvdata(dev); - sdkp = dev_get_drvdata(dev); scsi_autopm_get_device(sdkp->device); - device_del(&sdkp->dev); + device_del(&sdkp->disk_dev); del_gendisk(sdkp->disk); sd_shutdown(dev); - free_opal_dev(sdkp->opal_dev); - - mutex_lock(&sd_ref_mutex); - dev_set_drvdata(dev, NULL); - put_device(&sdkp->dev); - mutex_unlock(&sd_ref_mutex); - + put_disk(sdkp->disk); return 0; } -/** - * scsi_disk_release - Called to free the scsi_disk structure - * @dev: pointer to embedded class device - * - * sd_ref_mutex must be held entering this routine. Because it is - * called on last put, you should always use the scsi_disk_get() - * scsi_disk_put() helpers which manipulate the semaphore directly - * and never do a direct put_device. - **/ static void scsi_disk_release(struct device *dev) { struct scsi_disk *sdkp = to_scsi_disk(dev); - struct gendisk *disk = sdkp->disk; - struct request_queue *q = disk->queue; ida_free(&sd_index_ida, sdkp->index); - - /* - * Wait until all requests that are in progress have completed. - * This is necessary to avoid that e.g. scsi_end_request() crashes - * due to clearing the disk->private_data pointer. Wait from inside - * scsi_disk_release() instead of from sd_release() to avoid that - * freezing and unfreezing the request queue affects user space I/O - * in case multiple processes open a /dev/sd... node concurrently. - */ - blk_mq_freeze_queue(q); - blk_mq_unfreeze_queue(q); - - disk->private_data = NULL; - put_disk(disk); - put_device(&sdkp->device->sdev_gendev); - sd_zbc_release_disk(sdkp); + put_device(&sdkp->device->sdev_gendev); + free_opal_dev(sdkp->opal_dev); kfree(sdkp); } diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h index 2e5932bde43d..0a33a4b68ffb 100644 --- a/drivers/scsi/sd.h +++ b/drivers/scsi/sd.h @@ -68,9 +68,13 @@ enum { }; struct scsi_disk { - struct scsi_driver *driver; /* always &sd_template */ struct scsi_device *device; - struct device dev; + + /* + * disk_dev is used to show attributes in /sys/class/scsi_disk/, + * but otherwise not really needed. Do not use for refcounting. + */ + struct device disk_dev; struct gendisk *disk; struct opal_dev *opal_dev; #ifdef CONFIG_BLK_DEV_ZONED @@ -127,11 +131,11 @@ struct scsi_disk { unsigned security : 1; unsigned ignore_medium_access_errors : 1; }; -#define to_scsi_disk(obj) container_of(obj,struct scsi_disk,dev) +#define to_scsi_disk(obj) container_of(obj, struct scsi_disk, disk_dev) static inline struct scsi_disk *scsi_disk(struct gendisk *disk) { - return container_of(disk->private_data, struct scsi_disk, driver); + return disk->private_data; } #define sd_printk(prefix, sdsk, fmt, a...) \ diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c index f925b1f1f9ad..641552d6330b 100644 --- a/drivers/scsi/sr.c +++ b/drivers/scsi/sr.c @@ -109,11 +109,6 @@ static DEFINE_SPINLOCK(sr_index_lock); static struct lock_class_key sr_bio_compl_lkclass; -/* This semaphore is used to mediate the 0->1 reference get in the - * face of object destruction (i.e. we can't allow a get on an - * object after last put) */ -static DEFINE_MUTEX(sr_ref_mutex); - static int sr_open(struct cdrom_device_info *, int); static void sr_release(struct cdrom_device_info *); @@ -143,11 +138,9 @@ static const struct cdrom_device_ops sr_dops = { .capability = SR_CAPABILITIES, }; -static void sr_kref_release(struct kref *kref); - static inline struct scsi_cd *scsi_cd(struct gendisk *disk) { - return container_of(disk->private_data, struct scsi_cd, driver); + return disk->private_data; } static int sr_runtime_suspend(struct device *dev) @@ -163,38 +156,6 @@ static int sr_runtime_suspend(struct device *dev) return 0; } -/* - * The get and put routines for the struct scsi_cd. Note this entity - * has a scsi_device pointer and owns a reference to this. - */ -static inline struct scsi_cd *scsi_cd_get(struct gendisk *disk) -{ - struct scsi_cd *cd = NULL; - - mutex_lock(&sr_ref_mutex); - if (disk->private_data == NULL) - goto out; - cd = scsi_cd(disk); - kref_get(&cd->kref); - if (scsi_device_get(cd->device)) { - kref_put(&cd->kref, sr_kref_release); - cd = NULL; - } - out: - mutex_unlock(&sr_ref_mutex); - return cd; -} - -static void scsi_cd_put(struct scsi_cd *cd) -{ - struct scsi_device *sdev = cd->device; - - mutex_lock(&sr_ref_mutex); - kref_put(&cd->kref, sr_kref_release); - scsi_device_put(sdev); - mutex_unlock(&sr_ref_mutex); -} - static unsigned int sr_get_events(struct scsi_device *sdev) { u8 buf[8]; @@ -522,15 +483,13 @@ static void sr_revalidate_disk(struct scsi_cd *cd) static int sr_block_open(struct block_device *bdev, fmode_t mode) { - struct scsi_cd *cd; - struct scsi_device *sdev; - int ret = -ENXIO; + struct scsi_cd *cd = scsi_cd(bdev->bd_disk); + struct scsi_device *sdev = cd->device; + int ret; - cd = scsi_cd_get(bdev->bd_disk); - if (!cd) - goto out; + if (scsi_device_get(cd->device)) + return -ENXIO; - sdev = cd->device; scsi_autopm_get_device(sdev); if (bdev_check_media_change(bdev)) sr_revalidate_disk(cd); @@ -541,9 +500,7 @@ static int sr_block_open(struct block_device *bdev, fmode_t mode) scsi_autopm_put_device(sdev); if (ret) - scsi_cd_put(cd); - -out: + scsi_device_put(cd->device); return ret; } @@ -555,7 +512,7 @@ static void sr_block_release(struct gendisk *disk, fmode_t mode) cdrom_release(&cd->cdi, mode); mutex_unlock(&cd->lock); - scsi_cd_put(cd); + scsi_device_put(cd->device); } static int sr_block_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd, @@ -595,18 +552,24 @@ out: static unsigned int sr_block_check_events(struct gendisk *disk, unsigned int clearing) { - unsigned int ret = 0; - struct scsi_cd *cd; + struct scsi_cd *cd = disk->private_data; - cd = scsi_cd_get(disk); - if (!cd) + if (atomic_read(&cd->device->disk_events_disable_depth)) return 0; + return cdrom_check_events(&cd->cdi, clearing); +} - if (!atomic_read(&cd->device->disk_events_disable_depth)) - ret = cdrom_check_events(&cd->cdi, clearing); +static void sr_free_disk(struct gendisk *disk) +{ + struct scsi_cd *cd = disk->private_data; - scsi_cd_put(cd); - return ret; + spin_lock(&sr_index_lock); + clear_bit(MINOR(disk_devt(disk)), sr_index_bits); + spin_unlock(&sr_index_lock); + + unregister_cdrom(&cd->cdi); + mutex_destroy(&cd->lock); + kfree(cd); } static const struct block_device_operations sr_bdops = @@ -617,6 +580,7 @@ static const struct block_device_operations sr_bdops = .ioctl = sr_block_ioctl, .compat_ioctl = blkdev_compat_ptr_ioctl, .check_events = sr_block_check_events, + .free_disk = sr_free_disk, }; static int sr_open(struct cdrom_device_info *cdi, int purpose) @@ -660,8 +624,6 @@ static int sr_probe(struct device *dev) if (!cd) goto fail; - kref_init(&cd->kref); - disk = __alloc_disk_node(sdev->request_queue, NUMA_NO_NODE, &sr_bio_compl_lkclass); if (!disk) @@ -692,7 +654,6 @@ static int sr_probe(struct device *dev) cd->device = sdev; cd->disk = disk; - cd->driver = &sr_template; cd->capacity = 0x1fffff; cd->device->changed = 1; /* force recheck CD type */ cd->media_present = 1; @@ -713,7 +674,7 @@ static int sr_probe(struct device *dev) sr_vendor_init(cd); set_capacity(disk, cd->capacity); - disk->private_data = &cd->driver; + disk->private_data = cd; if (register_cdrom(disk, &cd->cdi)) goto fail_minor; @@ -728,10 +689,8 @@ static int sr_probe(struct device *dev) sr_revalidate_disk(cd); error = device_add_disk(&sdev->sdev_gendev, disk, NULL); - if (error) { - kref_put(&cd->kref, sr_kref_release); - goto fail; - } + if (error) + goto unregister_cdrom; sdev_printk(KERN_DEBUG, sdev, "Attached scsi CD-ROM %s\n", cd->cdi.name); @@ -739,6 +698,8 @@ static int sr_probe(struct device *dev) return 0; +unregister_cdrom: + unregister_cdrom(&cd->cdi); fail_minor: spin_lock(&sr_index_lock); clear_bit(minor, sr_index_bits); @@ -1010,36 +971,6 @@ out_put_request: return ret; } - -/** - * sr_kref_release - Called to free the scsi_cd structure - * @kref: pointer to embedded kref - * - * sr_ref_mutex must be held entering this routine. Because it is - * called on last put, you should always use the scsi_cd_get() - * scsi_cd_put() helpers which manipulate the semaphore directly - * and never do a direct kref_put(). - **/ -static void sr_kref_release(struct kref *kref) -{ - struct scsi_cd *cd = container_of(kref, struct scsi_cd, kref); - struct gendisk *disk = cd->disk; - - spin_lock(&sr_index_lock); - clear_bit(MINOR(disk_devt(disk)), sr_index_bits); - spin_unlock(&sr_index_lock); - - unregister_cdrom(&cd->cdi); - - disk->private_data = NULL; - - put_disk(disk); - - mutex_destroy(&cd->lock); - - kfree(cd); -} - static int sr_remove(struct device *dev) { struct scsi_cd *cd = dev_get_drvdata(dev); @@ -1047,11 +978,7 @@ static int sr_remove(struct device *dev) scsi_autopm_get_device(cd->device); del_gendisk(cd->disk); - dev_set_drvdata(dev, NULL); - - mutex_lock(&sr_ref_mutex); - kref_put(&cd->kref, sr_kref_release); - mutex_unlock(&sr_ref_mutex); + put_disk(cd->disk); return 0; } diff --git a/drivers/scsi/sr.h b/drivers/scsi/sr.h index 339c624e04d8..1175f2e213b5 100644 --- a/drivers/scsi/sr.h +++ b/drivers/scsi/sr.h @@ -18,8 +18,6 @@ #ifndef _SR_H #define _SR_H -#include <linux/genhd.h> -#include <linux/kref.h> #include <linux/mutex.h> #define MAX_RETRIES 3 @@ -33,7 +31,6 @@ struct scsi_device; typedef struct scsi_cd { - struct scsi_driver *driver; unsigned capacity; /* size in blocks */ struct scsi_device *device; unsigned int vendor; /* vendor code, see sr_vendor.c */ @@ -53,9 +50,6 @@ typedef struct scsi_cd { struct cdrom_device_info cdi; struct mutex lock; - /* We hold gendisk and scsi_device references on probe and use - * the refs on this kref to decide when to release them */ - struct kref kref; struct gendisk *disk; } Scsi_CD; diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c index e869e90e05af..ebe9412c86f4 100644 --- a/drivers/scsi/st.c +++ b/drivers/scsi/st.c @@ -4276,7 +4276,6 @@ static int st_probe(struct device *dev) goto out_buffer_free; } kref_init(&tpnt->kref); - tpnt->driver = &st_template; tpnt->device = SDp; if (SDp->scsi_level <= 2) diff --git a/drivers/scsi/st.h b/drivers/scsi/st.h index c0ef0d9aaf8a..7a68eaba7e81 100644 --- a/drivers/scsi/st.h +++ b/drivers/scsi/st.h @@ -117,7 +117,6 @@ struct scsi_tape_stats { /* The tape drive descriptor */ struct scsi_tape { - struct scsi_driver *driver; struct scsi_device *device; struct mutex lock; /* For serialization */ struct completion wait; /* For SCSI commands */ diff --git a/drivers/scsi/ufs/ufshpb.c b/drivers/scsi/ufs/ufshpb.c index 2d36a0715fca..8970068314ef 100644 --- a/drivers/scsi/ufs/ufshpb.c +++ b/drivers/scsi/ufs/ufshpb.c @@ -494,7 +494,7 @@ static struct ufshpb_req *ufshpb_get_map_req(struct ufshpb_lu *hpb, if (!map_req) return NULL; - bio = bio_alloc(GFP_KERNEL, hpb->pages_per_srgn); + bio = bio_alloc(NULL, hpb->pages_per_srgn, 0, GFP_KERNEL); if (!bio) { ufshpb_put_req(hpb, map_req); return NULL; @@ -2050,7 +2050,7 @@ static int ufshpb_pre_req_mempool_init(struct ufshpb_lu *hpb) INIT_LIST_HEAD(&pre_req->list_req); pre_req->req = NULL; - pre_req->bio = bio_alloc(GFP_KERNEL, 1); + pre_req->bio = bio_alloc(NULL, 1, 0, GFP_KERNEL); if (!pre_req->bio) goto release_mem; diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c index bf8ae4825a06..87ede165ddba 100644 --- a/drivers/target/target_core_iblock.c +++ b/drivers/target/target_core_iblock.c @@ -20,7 +20,6 @@ #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/bio.h> -#include <linux/genhd.h> #include <linux/file.h> #include <linux/module.h> #include <linux/scatterlist.h> @@ -353,18 +352,16 @@ static struct bio *iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num, * Only allocate as many vector entries as the bio code allows us to, * we'll loop later on until we have handled the whole request. */ - bio = bio_alloc_bioset(GFP_NOIO, bio_max_segs(sg_num), - &ib_dev->ibd_bio_set); + bio = bio_alloc_bioset(ib_dev->ibd_bd, bio_max_segs(sg_num), opf, + GFP_NOIO, &ib_dev->ibd_bio_set); if (!bio) { pr_err("Unable to allocate memory for bio\n"); return NULL; } - bio_set_dev(bio, ib_dev->ibd_bd); bio->bi_private = cmd; bio->bi_end_io = &iblock_bio_done; bio->bi_iter.bi_sector = lba; - bio->bi_opf = opf; return bio; } @@ -418,10 +415,9 @@ iblock_execute_sync_cache(struct se_cmd *cmd) if (immed) target_complete_cmd(cmd, SAM_STAT_GOOD); - bio = bio_alloc(GFP_KERNEL, 0); + bio = bio_alloc(ib_dev->ibd_bd, 0, REQ_OP_WRITE | REQ_PREFLUSH, + GFP_KERNEL); bio->bi_end_io = iblock_end_io_flush; - bio_set_dev(bio, ib_dev->ibd_bd); - bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; if (!immed) bio->bi_private = cmd; submit_bio(bio); diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c index 807d06ecadee..0fae71ac5cc8 100644 --- a/drivers/target/target_core_pscsi.c +++ b/drivers/target/target_core_pscsi.c @@ -17,7 +17,6 @@ #include <linux/blk_types.h> #include <linux/slab.h> #include <linux/spinlock.h> -#include <linux/genhd.h> #include <linux/cdrom.h> #include <linux/ratelimit.h> #include <linux/module.h> |