summaryrefslogtreecommitdiff
path: root/drivers/md
diff options
context:
space:
mode:
authorMike Snitzer <snitzer@redhat.com>2022-02-22 21:28:12 +0300
committerMike Snitzer <snitzer@redhat.com>2022-02-22 21:55:52 +0300
commitfa247089de9936a46e290d4724cb5f0b845600f5 (patch)
tree693bc8584c0d58c8346c741cedf0c30a98091989 /drivers/md
parenta6a4901a5ef35070297c769120fea04145df0aaa (diff)
downloadlinux-fa247089de9936a46e290d4724cb5f0b845600f5.tar.xz
dm: requeue IO if mapping table not yet available
Update both bio-based and request-based DM to requeue IO if the mapping table not available. This race of IO being submitted before the DM device ready is so narrow, yet possible for initial table load given that the DM device's request_queue is created prior, that it best to requeue IO to handle this unlikely case. Reported-by: Zhang Yi <yi.zhang@huawei.com> Signed-off-by: Mike Snitzer <snitzer@redhat.com>
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/dm-rq.c7
-rw-r--r--drivers/md/dm.c11
2 files changed, 9 insertions, 9 deletions
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index 6948d5db9092..3dd040a56318 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -491,8 +491,13 @@ static blk_status_t dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
if (unlikely(!ti)) {
int srcu_idx;
- struct dm_table *map = dm_get_live_table(md, &srcu_idx);
+ struct dm_table *map;
+ map = dm_get_live_table(md, &srcu_idx);
+ if (unlikely(!map)) {
+ dm_put_live_table(md, srcu_idx);
+ return BLK_STS_RESOURCE;
+ }
ti = dm_table_find_target(map, 0);
dm_put_live_table(md, srcu_idx);
}
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index ba1068f4cb52..3640ef4bf327 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1533,15 +1533,10 @@ static void dm_submit_bio(struct bio *bio)
struct dm_table *map;
map = dm_get_live_table(md, &srcu_idx);
- if (unlikely(!map)) {
- DMERR_LIMIT("%s: mapping table unavailable, erroring io",
- dm_device_name(md));
- bio_io_error(bio);
- goto out;
- }
- /* If suspended, queue this IO for later */
- if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) {
+ /* If suspended, or map not yet available, queue this IO for later */
+ if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) ||
+ unlikely(!map)) {
if (bio->bi_opf & REQ_NOWAIT)
bio_wouldblock_error(bio);
else if (bio->bi_opf & REQ_RAHEAD)