summaryrefslogtreecommitdiff
path: root/drivers/md/dm-thin.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2022-05-23 23:56:39 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2022-05-23 23:56:39 +0300
commit115cd47132d71bd7e4aa1093e15d861a59e73a94 (patch)
tree42e457126de728c9328e4b9b09b5ca4852a590de /drivers/md/dm-thin.c
parentf6792c877a1cacc3b3eea7cb5b45857b3c484c51 (diff)
parent2aaf516084184e4e6f80da01b2b3ed882fd20a79 (diff)
downloadlinux-115cd47132d71bd7e4aa1093e15d861a59e73a94.tar.xz
Merge tag 'for-5.19/block-2022-05-22' of git://git.kernel.dk/linux-block
Pull block updates from Jens Axboe: "Here are the core block changes for 5.19. This contains: - blk-throttle accounting fix (Laibin) - Series removing redundant assignments (Michal) - Expose bio cache via the bio_set, so that DM can use it (Mike) - Finish off the bio allocation interface cleanups by dealing with the weirdest member of the family. bio_kmalloc combines a kmalloc for the bio and bio_vecs with a hidden bio_init call and magic cleanup semantics (Christoph) - Clean up the block layer API so that APIs consumed by file systems are (almost) only struct block_device based, so that file systems don't have to poke into block layer internals like the request_queue (Christoph) - Clean up the blk_execute_rq* API (Christoph) - Clean up various lose end in the blk-cgroup code to make it easier to follow in preparation of reworking the blkcg assignment for bios (Christoph) - Fix use-after-free issues in BFQ when processes with merged queues get moved to different cgroups (Jan) - BFQ fixes (Jan) - Various fixes and cleanups (Bart, Chengming, Fanjun, Julia, Ming, Wolfgang, me)" * tag 'for-5.19/block-2022-05-22' of git://git.kernel.dk/linux-block: (83 commits) blk-mq: fix typo in comment bfq: Remove bfq_requeue_request_body() bfq: Remove superfluous conversion from RQ_BIC() bfq: Allow current waker to defend against a tentative one bfq: Relax waker detection for shared queues blk-cgroup: delete rcu_read_lock_held() WARN_ON_ONCE() blk-throttle: Set BIO_THROTTLED when bio has been throttled blk-cgroup: Remove unnecessary rcu_read_lock/unlock() blk-cgroup: always terminate io.stat lines block, bfq: make bfq_has_work() more accurate block, bfq: protect 'bfqd->queued' by 'bfqd->lock' block: cleanup the VM accounting in submit_bio block: Fix the bio.bi_opf comment block: reorder the REQ_ flags blk-iocost: combine local_stat and desc_stat to stat block: improve the error message from bio_check_eod block: allow passing a NULL bdev to bio_alloc_clone/bio_init_clone block: remove superfluous calls to blkcg_bio_issue_init kthread: unexport kthread_blkcg blk-cgroup: cleanup blkcg_maybe_throttle_current ...
Diffstat (limited to 'drivers/md/dm-thin.c')
-rw-r--r--drivers/md/dm-thin.c15
1 files changed, 3 insertions, 12 deletions
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 4d25d0e27031..84c083f76673 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -398,8 +398,8 @@ static int issue_discard(struct discard_op *op, dm_block_t data_b, dm_block_t da
sector_t s = block_to_sectors(tc->pool, data_b);
sector_t len = block_to_sectors(tc->pool, data_e - data_b);
- return __blkdev_issue_discard(tc->pool_dev->bdev, s, len,
- GFP_NOWAIT, 0, &op->bio);
+ return __blkdev_issue_discard(tc->pool_dev->bdev, s, len, GFP_NOWAIT,
+ &op->bio);
}
static void end_discard(struct discard_op *op, int r)
@@ -2802,13 +2802,6 @@ static void requeue_bios(struct pool *pool)
/*----------------------------------------------------------------
* Binding of control targets to a pool object
*--------------------------------------------------------------*/
-static bool data_dev_supports_discard(struct pool_c *pt)
-{
- struct request_queue *q = bdev_get_queue(pt->data_dev->bdev);
-
- return blk_queue_discard(q);
-}
-
static bool is_factor(sector_t block_size, uint32_t n)
{
return !sector_div(block_size, n);
@@ -2828,7 +2821,7 @@ static void disable_passdown_if_not_supported(struct pool_c *pt)
if (!pt->adjusted_pf.discard_passdown)
return;
- if (!data_dev_supports_discard(pt))
+ if (!bdev_max_discard_sectors(pt->data_dev->bdev))
reason = "discard unsupported";
else if (data_limits->max_discard_sectors < pool->sectors_per_block)
@@ -4057,8 +4050,6 @@ static void pool_io_hints(struct dm_target *ti, struct queue_limits *limits)
/*
* Must explicitly disallow stacking discard limits otherwise the
* block layer will stack them if pool's data device has support.
- * QUEUE_FLAG_DISCARD wouldn't be set but there is no way for the
- * user to see that, so make sure to set all discard limits to 0.
*/
limits->discard_granularity = 0;
return;