diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2015-09-03 02:35:26 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-09-03 02:35:26 +0300 |
commit | 1e1a4e8f439113b7820bc7150569f685e1cc2b43 (patch) | |
tree | 25a201240e619f7d766638fee57d8cfc55ec0bb9 /drivers/md/dm-thin.c | |
parent | d975f309a8b250e67b66eabeb56be6989c783629 (diff) | |
parent | cc7da0ba9c96699592d0a69d7d146ac6adcc18e7 (diff) | |
download | linux-1e1a4e8f439113b7820bc7150569f685e1cc2b43.tar.xz |
Merge tag 'dm-4.3-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm
Pull device mapper update from Mike Snitzer:
- a couple small cleanups in dm-cache, dm-verity, persistent-data's
dm-btree, and DM core.
- a 4.1-stable fix for dm-cache that fixes the leaking of deferred bio
prison cells
- a 4.2-stable fix that adds feature reporting for the dm-stats
features added in 4.2
- improve DM-snapshot to not invalidate the on-disk snapshot if
snapshot device write overflow occurs; but a write overflow triggered
through the origin device will still invalidate the snapshot.
- optimize DM-thinp's async discard submission a bit now that late bio
splitting has been included in block core.
- switch DM-cache's SMQ policy lock from using a mutex to a spinlock;
improves performance on very low latency devices (eg. NVMe SSD).
- document DM RAID 4/5/6's discard support
[ I did not pull the slab changes, which weren't appropriate for this
tree, and weren't obviously the right thing to do anyway. At the very
least they need some discussion and explanation before getting merged.
Because not pulling the actual tagged commit but doing a partial pull
instead, this merge commit thus also obviously is missing the git
signature from the original tag ]
* tag 'dm-4.3-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm:
dm cache: fix use after freeing migrations
dm cache: small cleanups related to deferred prison cell cleanup
dm cache: fix leaking of deferred bio prison cells
dm raid: document RAID 4/5/6 discard support
dm stats: report precise_timestamps and histogram in @stats_list output
dm thin: optimize async discard submission
dm snapshot: don't invalidate on-disk image on snapshot write overflow
dm: remove unlikely() before IS_ERR()
dm: do not override error code returned from dm_get_device()
dm: test return value for DM_MAPIO_SUBMITTED
dm verity: remove unused mempool
dm cache: move wake_waker() from free_migrations() to where it is needed
dm btree remove: remove unused function get_nr_entries()
dm btree: remove unused "dm_block_t root" parameter in btree_split_sibling()
dm cache policy smq: change the mutex to a spinlock
Diffstat (limited to 'drivers/md/dm-thin.c')
-rw-r--r-- | drivers/md/dm-thin.c | 89 |
1 files changed, 15 insertions, 74 deletions
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index 271a66249363..6578b7bc1fbb 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c @@ -332,9 +332,6 @@ struct thin_c { * * Description: * Asynchronously issue a discard request for the sectors in question. - * NOTE: this variant of blk-core's blkdev_issue_discard() is a stop-gap - * that is being kept local to DM thinp until the block changes to allow - * late bio splitting land upstream. */ static int __blkdev_issue_discard_async(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp_mask, unsigned long flags, @@ -342,91 +339,36 @@ static int __blkdev_issue_discard_async(struct block_device *bdev, sector_t sect { struct request_queue *q = bdev_get_queue(bdev); int type = REQ_WRITE | REQ_DISCARD; - unsigned int max_discard_sectors, granularity; - int alignment; struct bio *bio; - int ret = 0; - struct blk_plug plug; - if (!q) + if (!q || !nr_sects) return -ENXIO; if (!blk_queue_discard(q)) return -EOPNOTSUPP; - /* Zero-sector (unknown) and one-sector granularities are the same. */ - granularity = max(q->limits.discard_granularity >> 9, 1U); - alignment = (bdev_discard_alignment(bdev) >> 9) % granularity; - - /* - * Ensure that max_discard_sectors is of the proper - * granularity, so that requests stay aligned after a split. - */ - max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9); - max_discard_sectors -= max_discard_sectors % granularity; - if (unlikely(!max_discard_sectors)) { - /* Avoid infinite loop below. Being cautious never hurts. */ - return -EOPNOTSUPP; - } - if (flags & BLKDEV_DISCARD_SECURE) { if (!blk_queue_secdiscard(q)) return -EOPNOTSUPP; type |= REQ_SECURE; } - blk_start_plug(&plug); - while (nr_sects) { - unsigned int req_sects; - sector_t end_sect, tmp; - - /* - * Required bio_put occurs in bio_endio thanks to bio_chain below - */ - bio = bio_alloc(gfp_mask, 1); - if (!bio) { - ret = -ENOMEM; - break; - } - - req_sects = min_t(sector_t, nr_sects, max_discard_sectors); - - /* - * If splitting a request, and the next starting sector would be - * misaligned, stop the discard at the previous aligned sector. - */ - end_sect = sector + req_sects; - tmp = end_sect; - if (req_sects < nr_sects && - sector_div(tmp, granularity) != alignment) { - end_sect = end_sect - alignment; - sector_div(end_sect, granularity); - end_sect = end_sect * granularity + alignment; - req_sects = end_sect - sector; - } - - bio_chain(bio, parent_bio); - - bio->bi_iter.bi_sector = sector; - bio->bi_bdev = bdev; + /* + * Required bio_put occurs in bio_endio thanks to bio_chain below + */ + bio = bio_alloc(gfp_mask, 1); + if (!bio) + return -ENOMEM; - bio->bi_iter.bi_size = req_sects << 9; - nr_sects -= req_sects; - sector = end_sect; + bio_chain(bio, parent_bio); - submit_bio(type, bio); + bio->bi_iter.bi_sector = sector; + bio->bi_bdev = bdev; + bio->bi_iter.bi_size = nr_sects << 9; - /* - * We can loop for a long time in here, if someone does - * full device discards (like mkfs). Be nice and allow - * us to schedule out to avoid softlocking if preempt - * is disabled. - */ - cond_resched(); - } - blk_finish_plug(&plug); + submit_bio(type, bio); - return ret; + return 0; } static bool block_size_is_power_of_two(struct pool *pool) @@ -1543,9 +1485,8 @@ static void process_discard_cell_no_passdown(struct thin_c *tc, } /* - * FIXME: DM local hack to defer parent bios's end_io until we - * _know_ all chained sub range discard bios have completed. - * Will go away once late bio splitting lands upstream! + * __bio_inc_remaining() is used to defer parent bios's end_io until + * we _know_ all chained sub range discard bios have completed. */ static inline void __bio_inc_remaining(struct bio *bio) { |