summaryrefslogtreecommitdiff
path: root/drivers/md/raid5.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2021-06-30 22:21:16 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2021-06-30 22:21:16 +0300
commit440462198d9c45e48f2d8d9b18c5702d92282f46 (patch)
tree9aab5db02f35d0cf9034108116b6a483147791ad /drivers/md/raid5.c
parentdf668a5fe461bb9d7e899c538acc7197746038f4 (diff)
parent5ed9b357024dc43f75099f597187df05bcd5173c (diff)
downloadlinux-440462198d9c45e48f2d8d9b18c5702d92282f46.tar.xz
Merge tag 'for-5.14/drivers-2021-06-29' of git://git.kernel.dk/linux-block
Pull block driver updates from Jens Axboe: "Pretty calm round, mostly just NVMe and a bit of MD: - NVMe updates (via Christoph) - improve the APST configuration algorithm (Alexey Bogoslavsky) - look for StorageD3Enable on companion ACPI device (Mario Limonciello) - allow selecting the network interface for TCP connections (Martin Belanger) - misc cleanups (Amit Engel, Chaitanya Kulkarni, Colin Ian King, Christoph) - move the ACPI StorageD3 code to drivers/acpi/ and add quirks for certain AMD CPUs (Mario Limonciello) - zoned device support for nvmet (Chaitanya Kulkarni) - fix the rules for changing the serial number in nvmet (Noam Gottlieb) - various small fixes and cleanups (Dan Carpenter, JK Kim, Chaitanya Kulkarni, Hannes Reinecke, Wesley Sheng, Geert Uytterhoeven, Daniel Wagner) - MD updates (Via Song) - iostats rewrite (Guoqing Jiang) - raid5 lock contention optimization (Gal Ofri) - Fall through warning fix (Gustavo) - Misc fixes (Gustavo, Jiapeng)" * tag 'for-5.14/drivers-2021-06-29' of git://git.kernel.dk/linux-block: (78 commits) nvmet: use NVMET_MAX_NAMESPACES to set nn value loop: Fix missing discard support when using LOOP_CONFIGURE nvme.h: add missing nvme_lba_range_type endianness annotations nvme: remove zeroout memset call for struct nvme-pci: remove zeroout memset call for struct nvmet: remove zeroout memset call for struct nvmet: add ZBD over ZNS backend support nvmet: add Command Set Identifier support nvmet: add nvmet_req_bio put helper for backends nvmet: add req cns error complete helper block: export blk_next_bio() nvmet: remove local variable nvmet: use nvme status value directly nvmet: use u32 type for the local variable nsid nvmet: use u32 for nvmet_subsys max_nsid nvmet: use req->cmd directly in file-ns fast path nvmet: use req->cmd directly in bdev-ns fast path nvmet: make ver stable once connection established nvmet: allow mn change if subsys not discovered nvmet: make sn stable once connection was established ...
Diffstat (limited to 'drivers/md/raid5.c')
-rw-r--r--drivers/md/raid5.c63
1 files changed, 45 insertions, 18 deletions
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 7d4ff8a5c55e..b8436e4930ed 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -5362,11 +5362,13 @@ static struct bio *remove_bio_from_retry(struct r5conf *conf,
*/
static void raid5_align_endio(struct bio *bi)
{
- struct bio* raid_bi = bi->bi_private;
+ struct md_io_acct *md_io_acct = bi->bi_private;
+ struct bio *raid_bi = md_io_acct->orig_bio;
struct mddev *mddev;
struct r5conf *conf;
struct md_rdev *rdev;
blk_status_t error = bi->bi_status;
+ unsigned long start_time = md_io_acct->start_time;
bio_put(bi);
@@ -5378,6 +5380,8 @@ static void raid5_align_endio(struct bio *bi)
rdev_dec_pending(rdev, conf->mddev);
if (!error) {
+ if (blk_queue_io_stat(raid_bi->bi_bdev->bd_disk->queue))
+ bio_end_io_acct(raid_bi, start_time);
bio_endio(raid_bi);
if (atomic_dec_and_test(&conf->active_aligned_reads))
wake_up(&conf->wait_for_quiescent);
@@ -5396,6 +5400,8 @@ static int raid5_read_one_chunk(struct mddev *mddev, struct bio *raid_bio)
struct md_rdev *rdev;
sector_t sector, end_sector, first_bad;
int bad_sectors, dd_idx;
+ struct md_io_acct *md_io_acct;
+ bool did_inc;
if (!in_chunk_boundary(mddev, raid_bio)) {
pr_debug("%s: non aligned\n", __func__);
@@ -5425,29 +5431,46 @@ static int raid5_read_one_chunk(struct mddev *mddev, struct bio *raid_bio)
atomic_inc(&rdev->nr_pending);
rcu_read_unlock();
- align_bio = bio_clone_fast(raid_bio, GFP_NOIO, &mddev->bio_set);
- bio_set_dev(align_bio, rdev->bdev);
- align_bio->bi_end_io = raid5_align_endio;
- align_bio->bi_private = raid_bio;
- align_bio->bi_iter.bi_sector = sector;
-
- raid_bio->bi_next = (void *)rdev;
-
- if (is_badblock(rdev, sector, bio_sectors(align_bio), &first_bad,
+ if (is_badblock(rdev, sector, bio_sectors(raid_bio), &first_bad,
&bad_sectors)) {
- bio_put(align_bio);
+ bio_put(raid_bio);
rdev_dec_pending(rdev, mddev);
return 0;
}
+ align_bio = bio_clone_fast(raid_bio, GFP_NOIO, &mddev->io_acct_set);
+ md_io_acct = container_of(align_bio, struct md_io_acct, bio_clone);
+ raid_bio->bi_next = (void *)rdev;
+ if (blk_queue_io_stat(raid_bio->bi_bdev->bd_disk->queue))
+ md_io_acct->start_time = bio_start_io_acct(raid_bio);
+ md_io_acct->orig_bio = raid_bio;
+
+ bio_set_dev(align_bio, rdev->bdev);
+ align_bio->bi_end_io = raid5_align_endio;
+ align_bio->bi_private = md_io_acct;
+ align_bio->bi_iter.bi_sector = sector;
+
/* No reshape active, so we can trust rdev->data_offset */
align_bio->bi_iter.bi_sector += rdev->data_offset;
- spin_lock_irq(&conf->device_lock);
- wait_event_lock_irq(conf->wait_for_quiescent, conf->quiesce == 0,
- conf->device_lock);
- atomic_inc(&conf->active_aligned_reads);
- spin_unlock_irq(&conf->device_lock);
+ did_inc = false;
+ if (conf->quiesce == 0) {
+ atomic_inc(&conf->active_aligned_reads);
+ did_inc = true;
+ }
+ /* need a memory barrier to detect the race with raid5_quiesce() */
+ if (!did_inc || smp_load_acquire(&conf->quiesce) != 0) {
+ /* quiesce is in progress, so we need to undo io activation and wait
+ * for it to finish
+ */
+ if (did_inc && atomic_dec_and_test(&conf->active_aligned_reads))
+ wake_up(&conf->wait_for_quiescent);
+ spin_lock_irq(&conf->device_lock);
+ wait_event_lock_irq(conf->wait_for_quiescent, conf->quiesce == 0,
+ conf->device_lock);
+ atomic_inc(&conf->active_aligned_reads);
+ spin_unlock_irq(&conf->device_lock);
+ }
if (mddev->gendisk)
trace_block_bio_remap(align_bio, disk_devt(mddev->gendisk),
@@ -5796,6 +5819,7 @@ static bool raid5_make_request(struct mddev *mddev, struct bio * bi)
last_sector = bio_end_sector(bi);
bi->bi_next = NULL;
+ md_account_bio(mddev, &bi);
prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE);
for (; logical_sector < last_sector; logical_sector += RAID5_STRIPE_SECTORS(conf)) {
int previous;
@@ -6928,7 +6952,7 @@ static struct attribute *raid5_attrs[] = {
&ppl_write_hint.attr,
NULL,
};
-static struct attribute_group raid5_attrs_group = {
+static const struct attribute_group raid5_attrs_group = {
.name = NULL,
.attrs = raid5_attrs,
};
@@ -8334,7 +8358,10 @@ static void raid5_quiesce(struct mddev *mddev, int quiesce)
* active stripes can drain
*/
r5c_flush_cache(conf, INT_MAX);
- conf->quiesce = 2;
+ /* need a memory barrier to make sure read_one_chunk() sees
+ * quiesce started and reverts to slow (locked) path.
+ */
+ smp_store_release(&conf->quiesce, 2);
wait_event_cmd(conf->wait_for_quiescent,
atomic_read(&conf->active_stripes) == 0 &&
atomic_read(&conf->active_aligned_reads) == 0,