diff options
Diffstat (limited to 'drivers/md')
-rw-r--r-- | drivers/md/Kconfig | 2 | ||||
-rw-r--r-- | drivers/md/dm-bio-list.h | 3 | ||||
-rw-r--r-- | drivers/md/dm-mpath-rdac.c | 2 | ||||
-rw-r--r-- | drivers/md/raid5.c | 34 |
4 files changed, 21 insertions, 20 deletions
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig index 531d4d17d011..34a8c60a254a 100644 --- a/drivers/md/Kconfig +++ b/drivers/md/Kconfig @@ -263,7 +263,7 @@ config DM_MULTIPATH_EMC config DM_MULTIPATH_RDAC tristate "LSI/Engenio RDAC multipath support (EXPERIMENTAL)" - depends on DM_MULTIPATH && BLK_DEV_DM && EXPERIMENTAL + depends on DM_MULTIPATH && BLK_DEV_DM && SCSI && EXPERIMENTAL ---help--- Multipath support for LSI/Engenio RDAC. diff --git a/drivers/md/dm-bio-list.h b/drivers/md/dm-bio-list.h index 16ee3b018b3a..3f7b827649e3 100644 --- a/drivers/md/dm-bio-list.h +++ b/drivers/md/dm-bio-list.h @@ -9,6 +9,8 @@ #include <linux/bio.h> +#ifdef CONFIG_BLOCK + struct bio_list { struct bio *head; struct bio *tail; @@ -106,4 +108,5 @@ static inline struct bio *bio_list_get(struct bio_list *bl) return bio; } +#endif /* CONFIG_BLOCK */ #endif diff --git a/drivers/md/dm-mpath-rdac.c b/drivers/md/dm-mpath-rdac.c index 8b776b8cb7f7..16b161345775 100644 --- a/drivers/md/dm-mpath-rdac.c +++ b/drivers/md/dm-mpath-rdac.c @@ -292,7 +292,7 @@ static struct request *get_rdac_req(struct rdac_handler *h, rq->end_io_data = h; rq->timeout = h->timeout; rq->cmd_type = REQ_TYPE_BLOCK_PC; - rq->cmd_flags = REQ_FAILFAST | REQ_NOMERGE; + rq->cmd_flags |= REQ_FAILFAST | REQ_NOMERGE; return rq; } diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 2aff4be35dc4..f96dea975fa5 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -514,7 +514,7 @@ static void ops_complete_biofill(void *stripe_head_ref) struct stripe_head *sh = stripe_head_ref; struct bio *return_bi = NULL; raid5_conf_t *conf = sh->raid_conf; - int i, more_to_read = 0; + int i; pr_debug("%s: stripe %llu\n", __FUNCTION__, (unsigned long long)sh->sector); @@ -522,16 +522,14 @@ static void ops_complete_biofill(void *stripe_head_ref) /* clear completed biofills */ for (i = sh->disks; i--; ) { struct r5dev *dev = &sh->dev[i]; - /* check if this stripe has new incoming reads */ - if (dev->toread) - more_to_read++; /* acknowledge completion of a biofill operation */ - /* and check if we need to reply to a read request - */ - if (test_bit(R5_Wantfill, &dev->flags) && !dev->toread) { + /* and check if we need to reply to a read request, + * new R5_Wantfill requests are held off until + * !test_bit(STRIPE_OP_BIOFILL, &sh->ops.pending) + */ + if (test_and_clear_bit(R5_Wantfill, &dev->flags)) { struct bio *rbi, *rbi2; - clear_bit(R5_Wantfill, &dev->flags); /* The access to dev->read is outside of the * spin_lock_irq(&conf->device_lock), but is protected @@ -558,8 +556,7 @@ static void ops_complete_biofill(void *stripe_head_ref) return_io(return_bi); - if (more_to_read) - set_bit(STRIPE_HANDLE, &sh->state); + set_bit(STRIPE_HANDLE, &sh->state); release_stripe(sh); } @@ -2541,7 +2538,7 @@ static void handle_stripe_expansion(raid5_conf_t *conf, struct stripe_head *sh, struct dma_async_tx_descriptor *tx = NULL; clear_bit(STRIPE_EXPAND_SOURCE, &sh->state); for (i = 0; i < sh->disks; i++) - if (i != sh->pd_idx && (r6s && i != r6s->qd_idx)) { + if (i != sh->pd_idx && (!r6s || i != r6s->qd_idx)) { int dd_idx, pd_idx, j; struct stripe_head *sh2; @@ -2574,7 +2571,8 @@ static void handle_stripe_expansion(raid5_conf_t *conf, struct stripe_head *sh, set_bit(R5_UPTODATE, &sh2->dev[dd_idx].flags); for (j = 0; j < conf->raid_disks; j++) if (j != sh2->pd_idx && - (r6s && j != r6s->qd_idx) && + (!r6s || j != raid6_next_disk(sh2->pd_idx, + sh2->disks)) && !test_bit(R5_Expanded, &sh2->dev[j].flags)) break; if (j == conf->raid_disks) { @@ -2583,12 +2581,12 @@ static void handle_stripe_expansion(raid5_conf_t *conf, struct stripe_head *sh, } release_stripe(sh2); - /* done submitting copies, wait for them to complete */ - if (i + 1 >= sh->disks) { - async_tx_ack(tx); - dma_wait_for_async_tx(tx); - } } + /* done submitting copies, wait for them to complete */ + if (tx) { + async_tx_ack(tx); + dma_wait_for_async_tx(tx); + } } /* @@ -2855,7 +2853,7 @@ static void handle_stripe5(struct stripe_head *sh) sh->disks = conf->raid_disks; sh->pd_idx = stripe_to_pdidx(sh->sector, conf, conf->raid_disks); - s.locked += handle_write_operations5(sh, 0, 1); + s.locked += handle_write_operations5(sh, 1, 1); } else if (s.expanded && !test_bit(STRIPE_OP_POSTXOR, &sh->ops.pending)) { clear_bit(STRIPE_EXPAND_READY, &sh->state); |