summaryrefslogtreecommitdiff
path: root/drivers/md
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/dm-crypt.c50
-rw-r--r--drivers/md/dm-emc.c17
-rw-r--r--drivers/md/dm-io.c8
-rw-r--r--drivers/md/dm-mpath.c4
-rw-r--r--drivers/md/dm-raid1.c4
-rw-r--r--drivers/md/dm-snap.c2
-rw-r--r--drivers/md/dm-table.c28
-rw-r--r--drivers/md/dm-zero.c2
-rw-r--r--drivers/md/dm.c34
-rw-r--r--drivers/md/dm.h1
-rw-r--r--drivers/md/faulty.c10
-rw-r--r--drivers/md/linear.c24
-rw-r--r--drivers/md/md.c82
-rw-r--r--drivers/md/multipath.c43
-rw-r--r--drivers/md/raid0.c35
-rw-r--r--drivers/md/raid1.c64
-rw-r--r--drivers/md/raid10.c62
-rw-r--r--drivers/md/raid5.c79
18 files changed, 128 insertions, 421 deletions
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index bdc52d6922b7..64fee90bb68b 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -441,33 +441,12 @@ static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size)
return clone;
}
-static void crypt_free_buffer_pages(struct crypt_config *cc,
- struct bio *clone, unsigned int bytes)
+static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone)
{
- unsigned int i, start, end;
+ unsigned int i;
struct bio_vec *bv;
- /*
- * This is ugly, but Jens Axboe thinks that using bi_idx in the
- * endio function is too dangerous at the moment, so I calculate the
- * correct position using bi_vcnt and bi_size.
- * The bv_offset and bv_len fields might already be modified but we
- * know that we always allocated whole pages.
- * A fix to the bi_idx issue in the kernel is in the works, so
- * we will hopefully be able to revert to the cleaner solution soon.
- */
- i = clone->bi_vcnt - 1;
- bv = bio_iovec_idx(clone, i);
- end = (i << PAGE_SHIFT) + (bv->bv_offset + bv->bv_len) - clone->bi_size;
- start = end - bytes;
-
- start >>= PAGE_SHIFT;
- if (!clone->bi_size)
- end = clone->bi_vcnt;
- else
- end >>= PAGE_SHIFT;
-
- for (i = start; i < end; i++) {
+ for (i = 0; i < clone->bi_vcnt; i++) {
bv = bio_iovec_idx(clone, i);
BUG_ON(!bv->bv_page);
mempool_free(bv->bv_page, cc->page_pool);
@@ -489,7 +468,7 @@ static void dec_pending(struct dm_crypt_io *io, int error)
if (!atomic_dec_and_test(&io->pending))
return;
- bio_endio(io->base_bio, io->base_bio->bi_size, io->error);
+ bio_endio(io->base_bio, io->error);
mempool_free(io, cc->io_pool);
}
@@ -509,25 +488,19 @@ static void kcryptd_queue_io(struct dm_crypt_io *io)
queue_work(_kcryptd_workqueue, &io->work);
}
-static int crypt_endio(struct bio *clone, unsigned int done, int error)
+static void crypt_endio(struct bio *clone, int error)
{
struct dm_crypt_io *io = clone->bi_private;
struct crypt_config *cc = io->target->private;
unsigned read_io = bio_data_dir(clone) == READ;
/*
- * free the processed pages, even if
- * it's only a partially completed write
+ * free the processed pages
*/
- if (!read_io)
- crypt_free_buffer_pages(cc, clone, done);
-
- /* keep going - not finished yet */
- if (unlikely(clone->bi_size))
- return 1;
-
- if (!read_io)
+ if (!read_io) {
+ crypt_free_buffer_pages(cc, clone);
goto out;
+ }
if (unlikely(!bio_flagged(clone, BIO_UPTODATE))) {
error = -EIO;
@@ -537,12 +510,11 @@ static int crypt_endio(struct bio *clone, unsigned int done, int error)
bio_put(clone);
io->post_process = 1;
kcryptd_queue_io(io);
- return 0;
+ return;
out:
bio_put(clone);
dec_pending(io, error);
- return error;
}
static void clone_init(struct dm_crypt_io *io, struct bio *clone)
@@ -615,7 +587,7 @@ static void process_write(struct dm_crypt_io *io)
ctx.idx_out = 0;
if (unlikely(crypt_convert(cc, &ctx) < 0)) {
- crypt_free_buffer_pages(cc, clone, clone->bi_size);
+ crypt_free_buffer_pages(cc, clone);
bio_put(clone);
dec_pending(io, -EIO);
return;
diff --git a/drivers/md/dm-emc.c b/drivers/md/dm-emc.c
index 265c467854da..342517261ece 100644
--- a/drivers/md/dm-emc.c
+++ b/drivers/md/dm-emc.c
@@ -38,13 +38,10 @@ static inline void free_bio(struct bio *bio)
bio_put(bio);
}
-static int emc_endio(struct bio *bio, unsigned int bytes_done, int error)
+static void emc_endio(struct bio *bio, int error)
{
struct dm_path *path = bio->bi_private;
- if (bio->bi_size)
- return 1;
-
/* We also need to look at the sense keys here whether or not to
* switch to the next PG etc.
*
@@ -57,8 +54,6 @@ static int emc_endio(struct bio *bio, unsigned int bytes_done, int error)
/* request is freed in block layer */
free_bio(bio);
-
- return 0;
}
static struct bio *get_failover_bio(struct dm_path *path, unsigned data_size)
@@ -109,15 +104,7 @@ static struct request *get_failover_req(struct emc_handler *h,
return NULL;
}
- rq->bio = rq->biotail = bio;
- blk_rq_bio_prep(q, rq, bio);
-
- rq->rq_disk = bdev->bd_contains->bd_disk;
-
- /* bio backed don't set data */
- rq->buffer = rq->data = NULL;
- /* rq data_len used for pc cmd's request_bufflen */
- rq->data_len = bio->bi_size;
+ blk_rq_append_bio(q, rq, bio);
rq->sense = h->sense;
memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index f3a772486437..b8e342fe7586 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -124,15 +124,11 @@ static void dec_count(struct io *io, unsigned int region, int error)
}
}
-static int endio(struct bio *bio, unsigned int done, int error)
+static void endio(struct bio *bio, int error)
{
struct io *io;
unsigned region;
- /* keep going until we've finished */
- if (bio->bi_size)
- return 1;
-
if (error && bio_data_dir(bio) == READ)
zero_fill_bio(bio);
@@ -146,8 +142,6 @@ static int endio(struct bio *bio, unsigned int done, int error)
bio_put(bio);
dec_count(io, region, error);
-
- return 0;
}
/*-----------------------------------------------------------------
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index d6ca9d0a6fd1..31056abca89d 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -390,11 +390,11 @@ static void dispatch_queued_ios(struct multipath *m)
r = map_io(m, bio, mpio, 1);
if (r < 0)
- bio_endio(bio, bio->bi_size, r);
+ bio_endio(bio, r);
else if (r == DM_MAPIO_REMAPPED)
generic_make_request(bio);
else if (r == DM_MAPIO_REQUEUE)
- bio_endio(bio, bio->bi_size, -EIO);
+ bio_endio(bio, -EIO);
bio = next;
}
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index 144071e70a93..d09ff15490a5 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -820,7 +820,7 @@ static void write_callback(unsigned long error, void *context)
break;
}
}
- bio_endio(bio, bio->bi_size, 0);
+ bio_endio(bio, 0);
}
static void do_write(struct mirror_set *ms, struct bio *bio)
@@ -900,7 +900,7 @@ static void do_writes(struct mirror_set *ms, struct bio_list *writes)
*/
if (unlikely(ms->log_failure))
while ((bio = bio_list_pop(&sync)))
- bio_endio(bio, bio->bi_size, -EIO);
+ bio_endio(bio, -EIO);
else while ((bio = bio_list_pop(&sync)))
do_write(ms, bio);
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index 83ddbfe6b8a4..98a633f3d6b0 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -636,7 +636,7 @@ static void error_bios(struct bio *bio)
while (bio) {
n = bio->bi_next;
bio->bi_next = NULL;
- bio_io_error(bio, bio->bi_size);
+ bio_io_error(bio);
bio = n;
}
}
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 2bcde5798b5a..fbe477bb2c68 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -999,33 +999,6 @@ void dm_table_unplug_all(struct dm_table *t)
}
}
-int dm_table_flush_all(struct dm_table *t)
-{
- struct list_head *d, *devices = dm_table_get_devices(t);
- int ret = 0;
- unsigned i;
-
- for (i = 0; i < t->num_targets; i++)
- if (t->targets[i].type->flush)
- t->targets[i].type->flush(&t->targets[i]);
-
- for (d = devices->next; d != devices; d = d->next) {
- struct dm_dev *dd = list_entry(d, struct dm_dev, list);
- struct request_queue *q = bdev_get_queue(dd->bdev);
- int err;
-
- if (!q->issue_flush_fn)
- err = -EOPNOTSUPP;
- else
- err = q->issue_flush_fn(q, dd->bdev->bd_disk, NULL);
-
- if (!ret)
- ret = err;
- }
-
- return ret;
-}
-
struct mapped_device *dm_table_get_md(struct dm_table *t)
{
dm_get(t->md);
@@ -1043,4 +1016,3 @@ EXPORT_SYMBOL(dm_table_get_md);
EXPORT_SYMBOL(dm_table_put);
EXPORT_SYMBOL(dm_table_get);
EXPORT_SYMBOL(dm_table_unplug_all);
-EXPORT_SYMBOL(dm_table_flush_all);
diff --git a/drivers/md/dm-zero.c b/drivers/md/dm-zero.c
index f314d7dc9c26..bdec206c404b 100644
--- a/drivers/md/dm-zero.c
+++ b/drivers/md/dm-zero.c
@@ -43,7 +43,7 @@ static int zero_map(struct dm_target *ti, struct bio *bio,
break;
}
- bio_endio(bio, bio->bi_size, 0);
+ bio_endio(bio, 0);
/* accepted bio, don't make new request */
return DM_MAPIO_SUBMITTED;
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 2120155929a6..d837d37f6209 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -484,23 +484,20 @@ static void dec_pending(struct dm_io *io, int error)
blk_add_trace_bio(io->md->queue, io->bio,
BLK_TA_COMPLETE);
- bio_endio(io->bio, io->bio->bi_size, io->error);
+ bio_endio(io->bio, io->error);
}
free_io(io->md, io);
}
}
-static int clone_endio(struct bio *bio, unsigned int done, int error)
+static void clone_endio(struct bio *bio, int error)
{
int r = 0;
struct dm_target_io *tio = bio->bi_private;
struct mapped_device *md = tio->io->md;
dm_endio_fn endio = tio->ti->type->end_io;
- if (bio->bi_size)
- return 1;
-
if (!bio_flagged(bio, BIO_UPTODATE) && !error)
error = -EIO;
@@ -514,7 +511,7 @@ static int clone_endio(struct bio *bio, unsigned int done, int error)
error = r;
else if (r == DM_ENDIO_INCOMPLETE)
/* The target will handle the io */
- return 1;
+ return;
else if (r) {
DMWARN("unimplemented target endio return value: %d", r);
BUG();
@@ -530,7 +527,6 @@ static int clone_endio(struct bio *bio, unsigned int done, int error)
bio_put(bio);
free_tio(md, tio);
- return r;
}
static sector_t max_io_len(struct mapped_device *md,
@@ -761,7 +757,7 @@ static void __split_bio(struct mapped_device *md, struct bio *bio)
ci.map = dm_get_table(md);
if (!ci.map) {
- bio_io_error(bio, bio->bi_size);
+ bio_io_error(bio);
return;
}
@@ -803,7 +799,7 @@ static int dm_request(struct request_queue *q, struct bio *bio)
* guarantee it is (or can be) handled by the targets correctly.
*/
if (unlikely(bio_barrier(bio))) {
- bio_endio(bio, bio->bi_size, -EOPNOTSUPP);
+ bio_endio(bio, -EOPNOTSUPP);
return 0;
}
@@ -820,13 +816,13 @@ static int dm_request(struct request_queue *q, struct bio *bio)
up_read(&md->io_lock);
if (bio_rw(bio) == READA) {
- bio_io_error(bio, bio->bi_size);
+ bio_io_error(bio);
return 0;
}
r = queue_io(md, bio);
if (r < 0) {
- bio_io_error(bio, bio->bi_size);
+ bio_io_error(bio);
return 0;
} else if (r == 0)
@@ -844,21 +840,6 @@ static int dm_request(struct request_queue *q, struct bio *bio)
return 0;
}
-static int dm_flush_all(struct request_queue *q, struct gendisk *disk,
- sector_t *error_sector)
-{
- struct mapped_device *md = q->queuedata;
- struct dm_table *map = dm_get_table(md);
- int ret = -ENXIO;
-
- if (map) {
- ret = dm_table_flush_all(map);
- dm_table_put(map);
- }
-
- return ret;
-}
-
static void dm_unplug_all(struct request_queue *q)
{
struct mapped_device *md = q->queuedata;
@@ -1007,7 +988,6 @@ static struct mapped_device *alloc_dev(int minor)
blk_queue_make_request(md->queue, dm_request);
blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
md->queue->unplug_fn = dm_unplug_all;
- md->queue->issue_flush_fn = dm_flush_all;
md->io_pool = mempool_create_slab_pool(MIN_IOS, _io_cache);
if (!md->io_pool)
diff --git a/drivers/md/dm.h b/drivers/md/dm.h
index 462ee652a890..4b3faa45277e 100644
--- a/drivers/md/dm.h
+++ b/drivers/md/dm.h
@@ -111,7 +111,6 @@ void dm_table_postsuspend_targets(struct dm_table *t);
int dm_table_resume_targets(struct dm_table *t);
int dm_table_any_congested(struct dm_table *t, int bdi_bits);
void dm_table_unplug_all(struct dm_table *t);
-int dm_table_flush_all(struct dm_table *t);
/*-----------------------------------------------------------------
* A registry of target types.
diff --git a/drivers/md/faulty.c b/drivers/md/faulty.c
index cb059cf14c2e..cf2ddce34118 100644
--- a/drivers/md/faulty.c
+++ b/drivers/md/faulty.c
@@ -65,18 +65,16 @@
#include <linux/raid/md.h>
-static int faulty_fail(struct bio *bio, unsigned int bytes_done, int error)
+static void faulty_fail(struct bio *bio, int error)
{
struct bio *b = bio->bi_private;
b->bi_size = bio->bi_size;
b->bi_sector = bio->bi_sector;
- if (bio->bi_size == 0)
- bio_put(bio);
+ bio_put(bio);
- clear_bit(BIO_UPTODATE, &b->bi_flags);
- return (b->bi_end_io)(b, bytes_done, -EIO);
+ bio_io_error(b);
}
typedef struct faulty_conf {
@@ -179,7 +177,7 @@ static int make_request(struct request_queue *q, struct bio *bio)
/* special case - don't decrement, don't generic_make_request,
* just fail immediately
*/
- bio_endio(bio, bio->bi_size, -EIO);
+ bio_endio(bio, -EIO);
return 0;
}
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index 17f795c3e0ab..56a11f6c127b 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -92,25 +92,6 @@ static void linear_unplug(struct request_queue *q)
}
}
-static int linear_issue_flush(struct request_queue *q, struct gendisk *disk,
- sector_t *error_sector)
-{
- mddev_t *mddev = q->queuedata;
- linear_conf_t *conf = mddev_to_conf(mddev);
- int i, ret = 0;
-
- for (i=0; i < mddev->raid_disks && ret == 0; i++) {
- struct block_device *bdev = conf->disks[i].rdev->bdev;
- struct request_queue *r_queue = bdev_get_queue(bdev);
-
- if (!r_queue->issue_flush_fn)
- ret = -EOPNOTSUPP;
- else
- ret = r_queue->issue_flush_fn(r_queue, bdev->bd_disk, error_sector);
- }
- return ret;
-}
-
static int linear_congested(void *data, int bits)
{
mddev_t *mddev = data;
@@ -279,7 +260,6 @@ static int linear_run (mddev_t *mddev)
blk_queue_merge_bvec(mddev->queue, linear_mergeable_bvec);
mddev->queue->unplug_fn = linear_unplug;
- mddev->queue->issue_flush_fn = linear_issue_flush;
mddev->queue->backing_dev_info.congested_fn = linear_congested;
mddev->queue->backing_dev_info.congested_data = mddev;
return 0;
@@ -338,7 +318,7 @@ static int linear_make_request (struct request_queue *q, struct bio *bio)
sector_t block;
if (unlikely(bio_barrier(bio))) {
- bio_endio(bio, bio->bi_size, -EOPNOTSUPP);
+ bio_endio(bio, -EOPNOTSUPP);
return 0;
}
@@ -358,7 +338,7 @@ static int linear_make_request (struct request_queue *q, struct bio *bio)
bdevname(tmp_dev->rdev->bdev, b),
(unsigned long long)tmp_dev->size,
(unsigned long long)tmp_dev->offset);
- bio_io_error(bio, bio->bi_size);
+ bio_io_error(bio);
return 0;
}
if (unlikely(bio->bi_sector + (bio->bi_size >> 9) >
diff --git a/drivers/md/md.c b/drivers/md/md.c
index f883b7e37f3d..808cd9549456 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -213,7 +213,7 @@ static DEFINE_SPINLOCK(all_mddevs_lock);
static int md_fail_request (struct request_queue *q, struct bio *bio)
{
- bio_io_error(bio, bio->bi_size);
+ bio_io_error(bio);
return 0;
}
@@ -384,12 +384,10 @@ static void free_disk_sb(mdk_rdev_t * rdev)
}
-static int super_written(struct bio *bio, unsigned int bytes_done, int error)
+static void super_written(struct bio *bio, int error)
{
mdk_rdev_t *rdev = bio->bi_private;
mddev_t *mddev = rdev->mddev;
- if (bio->bi_size)
- return 1;
if (error || !test_bit(BIO_UPTODATE, &bio->bi_flags)) {
printk("md: super_written gets error=%d, uptodate=%d\n",
@@ -401,16 +399,13 @@ static int super_written(struct bio *bio, unsigned int bytes_done, int error)
if (atomic_dec_and_test(&mddev->pending_writes))
wake_up(&mddev->sb_wait);
bio_put(bio);
- return 0;
}
-static int super_written_barrier(struct bio *bio, unsigned int bytes_done, int error)
+static void super_written_barrier(struct bio *bio, int error)
{
struct bio *bio2 = bio->bi_private;
mdk_rdev_t *rdev = bio2->bi_private;
mddev_t *mddev = rdev->mddev;
- if (bio->bi_size)
- return 1;
if (!test_bit(BIO_UPTODATE, &bio->bi_flags) &&
error == -EOPNOTSUPP) {
@@ -424,11 +419,11 @@ static int super_written_barrier(struct bio *bio, unsigned int bytes_done, int e
spin_unlock_irqrestore(&mddev->write_lock, flags);
wake_up(&mddev->sb_wait);
bio_put(bio);
- return 0;
+ } else {
+ bio_put(bio2);
+ bio->bi_private = rdev;
+ super_written(bio, error);
}
- bio_put(bio2);
- bio->bi_private = rdev;
- return super_written(bio, bytes_done, error);
}
void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev,
@@ -489,13 +484,9 @@ void md_super_wait(mddev_t *mddev)
finish_wait(&mddev->sb_wait, &wq);
}
-static int bi_complete(struct bio *bio, unsigned int bytes_done, int error)
+static void bi_complete(struct bio *bio, int error)
{
- if (bio->bi_size)
- return 1;
-
complete((struct completion*)bio->bi_private);
- return 0;
}
int sync_page_io(struct block_device *bdev, sector_t sector, int size,
@@ -2723,7 +2714,7 @@ action_show(mddev_t *mddev, char *page)
{
char *type = "idle";
if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
- test_bit(MD_RECOVERY_NEEDED, &mddev->recovery)) {
+ (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))) {
if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
type = "reshape";
else if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
@@ -2842,6 +2833,12 @@ sync_max_store(mddev_t *mddev, const char *buf, size_t len)
static struct md_sysfs_entry md_sync_max =
__ATTR(sync_speed_max, S_IRUGO|S_IWUSR, sync_max_show, sync_max_store);
+static ssize_t
+degraded_show(mddev_t *mddev, char *page)
+{
+ return sprintf(page, "%d\n", mddev->degraded);
+}
+static struct md_sysfs_entry md_degraded = __ATTR_RO(degraded);
static ssize_t
sync_speed_show(mddev_t *mddev, char *page)
@@ -2985,6 +2982,7 @@ static struct attribute *md_redundancy_attrs[] = {
&md_suspend_lo.attr,
&md_suspend_hi.attr,
&md_bitmap.attr,
+ &md_degraded.attr,
NULL,
};
static struct attribute_group md_redundancy_group = {
@@ -3085,8 +3083,7 @@ static struct kobject *md_probe(dev_t dev, int *part, void *data)
mddev->gendisk = disk;
mutex_unlock(&disks_mutex);
mddev->kobj.parent = &disk->kobj;
- mddev->kobj.k_name = NULL;
- snprintf(mddev->kobj.name, KOBJ_NAME_LEN, "%s", "md");
+ kobject_set_name(&mddev->kobj, "%s", "md");
mddev->kobj.ktype = &md_ktype;
if (kobject_register(&mddev->kobj))
printk(KERN_WARNING "md: cannot register %s/md - name in use\n",
@@ -3473,7 +3470,6 @@ static int do_md_stop(mddev_t * mddev, int mode)
mddev->pers->stop(mddev);
mddev->queue->merge_bvec_fn = NULL;
mddev->queue->unplug_fn = NULL;
- mddev->queue->issue_flush_fn = NULL;
mddev->queue->backing_dev_info.congested_fn = NULL;
if (mddev->pers->sync_request)
sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
@@ -4721,7 +4717,7 @@ mdk_thread_t *md_register_thread(void (*run) (mddev_t *), mddev_t *mddev,
void md_unregister_thread(mdk_thread_t *thread)
{
- dprintk("interrupting MD-thread pid %d\n", thread->tsk->pid);
+ dprintk("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk));
kthread_stop(thread->tsk);
kfree(thread);
@@ -5781,26 +5777,47 @@ static int __init md_init(void)
* Searches all registered partitions for autorun RAID arrays
* at boot time.
*/
-static dev_t detected_devices[128];
-static int dev_cnt;
+
+static LIST_HEAD(all_detected_devices);
+struct detected_devices_node {
+ struct list_head list;
+ dev_t dev;
+};
void md_autodetect_dev(dev_t dev)
{
- if (dev_cnt >= 0 && dev_cnt < 127)
- detected_devices[dev_cnt++] = dev;
+ struct detected_devices_node *node_detected_dev;
+
+ node_detected_dev = kzalloc(sizeof(*node_detected_dev), GFP_KERNEL);
+ if (node_detected_dev) {
+ node_detected_dev->dev = dev;
+ list_add_tail(&node_detected_dev->list, &all_detected_devices);
+ } else {
+ printk(KERN_CRIT "md: md_autodetect_dev: kzalloc failed"
+ ", skipping dev(%d,%d)\n", MAJOR(dev), MINOR(dev));
+ }
}
static void autostart_arrays(int part)
{
mdk_rdev_t *rdev;
- int i;
+ struct detected_devices_node *node_detected_dev;
+ dev_t dev;
+ int i_scanned, i_passed;
- printk(KERN_INFO "md: Autodetecting RAID arrays.\n");
+ i_scanned = 0;
+ i_passed = 0;
- for (i = 0; i < dev_cnt; i++) {
- dev_t dev = detected_devices[i];
+ printk(KERN_INFO "md: Autodetecting RAID arrays.\n");
+ while (!list_empty(&all_detected_devices) && i_scanned < INT_MAX) {
+ i_scanned++;
+ node_detected_dev = list_entry(all_detected_devices.next,
+ struct detected_devices_node, list);
+ list_del(&node_detected_dev->list);
+ dev = node_detected_dev->dev;
+ kfree(node_detected_dev);
rdev = md_import_device(dev,0, 90);
if (IS_ERR(rdev))
continue;
@@ -5810,8 +5827,11 @@ static void autostart_arrays(int part)
continue;
}
list_add(&rdev->same_set, &pending_raid_disks);
+ i_passed++;
}
- dev_cnt = 0;
+
+ printk(KERN_INFO "md: Scanned %d and added %d devices.\n",
+ i_scanned, i_passed);
autorun_devices(part);
}
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index 1e2af43a73b9..b35731cceac6 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -82,21 +82,17 @@ static void multipath_end_bh_io (struct multipath_bh *mp_bh, int err)
struct bio *bio = mp_bh->master_bio;
multipath_conf_t *conf = mddev_to_conf(mp_bh->mddev);
- bio_endio(bio, bio->bi_size, err);
+ bio_endio(bio, err);
mempool_free(mp_bh, conf->pool);
}
-static int multipath_end_request(struct bio *bio, unsigned int bytes_done,
- int error)
+static void multipath_end_request(struct bio *bio, int error)
{
int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
struct multipath_bh * mp_bh = (struct multipath_bh *)(bio->bi_private);
multipath_conf_t *conf = mddev_to_conf(mp_bh->mddev);
mdk_rdev_t *rdev = conf->multipaths[mp_bh->path].rdev;
- if (bio->bi_size)
- return 1;
-
if (uptodate)
multipath_end_bh_io(mp_bh, 0);
else if (!bio_rw_ahead(bio)) {
@@ -112,7 +108,6 @@ static int multipath_end_request(struct bio *bio, unsigned int bytes_done,
} else
multipath_end_bh_io(mp_bh, error);
rdev_dec_pending(rdev, conf->mddev);
- return 0;
}
static void unplug_slaves(mddev_t *mddev)
@@ -155,7 +150,7 @@ static int multipath_make_request (struct request_queue *q, struct bio * bio)
const int rw = bio_data_dir(bio);
if (unlikely(bio_barrier(bio))) {
- bio_endio(bio, bio->bi_size, -EOPNOTSUPP);
+ bio_endio(bio, -EOPNOTSUPP);
return 0;
}
@@ -169,7 +164,7 @@ static int multipath_make_request (struct request_queue *q, struct bio * bio)
mp_bh->path = multipath_map(conf);
if (mp_bh->path < 0) {
- bio_endio(bio, bio->bi_size, -EIO);
+ bio_endio(bio, -EIO);
mempool_free(mp_bh, conf->pool);
return 0;
}
@@ -199,35 +194,6 @@ static void multipath_status (struct seq_file *seq, mddev_t *mddev)
seq_printf (seq, "]");
}
-static int multipath_issue_flush(struct request_queue *q, struct gendisk *disk,
- sector_t *error_sector)
-{
- mddev_t *mddev = q->queuedata;
- multipath_conf_t *conf = mddev_to_conf(mddev);
- int i, ret = 0;
-
- rcu_read_lock();
- for (i=0; i<mddev->raid_disks && ret == 0; i++) {
- mdk_rdev_t *rdev = rcu_dereference(conf->multipaths[i].rdev);
- if (rdev && !test_bit(Faulty, &rdev->flags)) {
- struct block_device *bdev = rdev->bdev;
- struct request_queue *r_queue = bdev_get_queue(bdev);
-
- if (!r_queue->issue_flush_fn)
- ret = -EOPNOTSUPP;
- else {
- atomic_inc(&rdev->nr_pending);
- rcu_read_unlock();
- ret = r_queue->issue_flush_fn(r_queue, bdev->bd_disk,
- error_sector);
- rdev_dec_pending(rdev, mddev);
- rcu_read_lock();
- }
- }
- }
- rcu_read_unlock();
- return ret;
-}
static int multipath_congested(void *data, int bits)
{
mddev_t *mddev = data;
@@ -532,7 +498,6 @@ static int multipath_run (mddev_t *mddev)
mddev->array_size = mddev->size;
mddev->queue->unplug_fn = multipath_unplug;
- mddev->queue->issue_flush_fn = multipath_issue_flush;
mddev->queue->backing_dev_info.congested_fn = multipath_congested;
mddev->queue->backing_dev_info.congested_data = mddev;
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index b8216bc6db45..c111105fc2dc 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -40,26 +40,6 @@ static void raid0_unplug(struct request_queue *q)
}
}
-static int raid0_issue_flush(struct request_queue *q, struct gendisk *disk,
- sector_t *error_sector)
-{
- mddev_t *mddev = q->queuedata;
- raid0_conf_t *conf = mddev_to_conf(mddev);
- mdk_rdev_t **devlist = conf->strip_zone[0].dev;
- int i, ret = 0;
-
- for (i=0; i<mddev->raid_disks && ret == 0; i++) {
- struct block_device *bdev = devlist[i]->bdev;
- struct request_queue *r_queue = bdev_get_queue(bdev);
-
- if (!r_queue->issue_flush_fn)
- ret = -EOPNOTSUPP;
- else
- ret = r_queue->issue_flush_fn(r_queue, bdev->bd_disk, error_sector);
- }
- return ret;
-}
-
static int raid0_congested(void *data, int bits)
{
mddev_t *mddev = data;
@@ -250,7 +230,6 @@ static int create_strip_zones (mddev_t *mddev)
mddev->queue->unplug_fn = raid0_unplug;
- mddev->queue->issue_flush_fn = raid0_issue_flush;
mddev->queue->backing_dev_info.congested_fn = raid0_congested;
mddev->queue->backing_dev_info.congested_data = mddev;
@@ -420,7 +399,7 @@ static int raid0_make_request (struct request_queue *q, struct bio *bio)
const int rw = bio_data_dir(bio);
if (unlikely(bio_barrier(bio))) {
- bio_endio(bio, bio->bi_size, -EOPNOTSUPP);
+ bio_endio(bio, -EOPNOTSUPP);
return 0;
}
@@ -490,10 +469,10 @@ bad_map:
" or bigger than %dk %llu %d\n", chunk_size,
(unsigned long long)bio->bi_sector, bio->bi_size >> 10);
- bio_io_error(bio, bio->bi_size);
+ bio_io_error(bio);
return 0;
}
-
+
static void raid0_status (struct seq_file *seq, mddev_t *mddev)
{
#undef MD_DEBUG
@@ -501,18 +480,18 @@ static void raid0_status (struct seq_file *seq, mddev_t *mddev)
int j, k, h;
char b[BDEVNAME_SIZE];
raid0_conf_t *conf = mddev_to_conf(mddev);
-
+
h = 0;
for (j = 0; j < conf->nr_strip_zones; j++) {
seq_printf(seq, " z%d", j);
if (conf->hash_table[h] == conf->strip_zone+j)
- seq_printf("(h%d)", h++);
+ seq_printf(seq, "(h%d)", h++);
seq_printf(seq, "=[");
for (k = 0; k < conf->strip_zone[j].nb_dev; k++)
- seq_printf (seq, "%s/", bdevname(
+ seq_printf(seq, "%s/", bdevname(
conf->strip_zone[j].dev[k]->bdev,b));
- seq_printf (seq, "] zo=%d do=%d s=%d\n",
+ seq_printf(seq, "] zo=%d do=%d s=%d\n",
conf->strip_zone[j].zone_offset,
conf->strip_zone[j].dev_offset,
conf->strip_zone[j].size);
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index f33a729960ca..16775a0df7f6 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -238,7 +238,7 @@ static void raid_end_bio_io(r1bio_t *r1_bio)
(unsigned long long) bio->bi_sector +
(bio->bi_size >> 9) - 1);
- bio_endio(bio, bio->bi_size,
+ bio_endio(bio,
test_bit(R1BIO_Uptodate, &r1_bio->state) ? 0 : -EIO);
}
free_r1bio(r1_bio);
@@ -255,16 +255,13 @@ static inline void update_head_pos(int disk, r1bio_t *r1_bio)
r1_bio->sector + (r1_bio->sectors);
}
-static int raid1_end_read_request(struct bio *bio, unsigned int bytes_done, int error)
+static void raid1_end_read_request(struct bio *bio, int error)
{
int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private);
int mirror;
conf_t *conf = mddev_to_conf(r1_bio->mddev);
- if (bio->bi_size)
- return 1;
-
mirror = r1_bio->read_disk;
/*
* this branch is our 'one mirror IO has finished' event handler:
@@ -301,10 +298,9 @@ static int raid1_end_read_request(struct bio *bio, unsigned int bytes_done, int
}
rdev_dec_pending(conf->mirrors[mirror].rdev, conf->mddev);
- return 0;
}
-static int raid1_end_write_request(struct bio *bio, unsigned int bytes_done, int error)
+static void raid1_end_write_request(struct bio *bio, int error)
{
int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private);
@@ -312,8 +308,6 @@ static int raid1_end_write_request(struct bio *bio, unsigned int bytes_done, int
conf_t *conf = mddev_to_conf(r1_bio->mddev);
struct bio *to_put = NULL;
- if (bio->bi_size)
- return 1;
for (mirror = 0; mirror < conf->raid_disks; mirror++)
if (r1_bio->bios[mirror] == bio)
@@ -366,7 +360,7 @@ static int raid1_end_write_request(struct bio *bio, unsigned int bytes_done, int
(unsigned long long) mbio->bi_sector,
(unsigned long long) mbio->bi_sector +
(mbio->bi_size >> 9) - 1);
- bio_endio(mbio, mbio->bi_size, 0);
+ bio_endio(mbio, 0);
}
}
}
@@ -400,8 +394,6 @@ static int raid1_end_write_request(struct bio *bio, unsigned int bytes_done, int
if (to_put)
bio_put(to_put);
-
- return 0;
}
@@ -575,36 +567,6 @@ static void raid1_unplug(struct request_queue *q)
md_wakeup_thread(mddev->thread);
}
-static int raid1_issue_flush(struct request_queue *q, struct gendisk *disk,
- sector_t *error_sector)
-{
- mddev_t *mddev = q->queuedata;
- conf_t *conf = mddev_to_conf(mddev);
- int i, ret = 0;
-
- rcu_read_lock();
- for (i=0; i<mddev->raid_disks && ret == 0; i++) {
- mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
- if (rdev && !test_bit(Faulty, &rdev->flags)) {
- struct block_device *bdev = rdev->bdev;
- struct request_queue *r_queue = bdev_get_queue(bdev);
-
- if (!r_queue->issue_flush_fn)
- ret = -EOPNOTSUPP;
- else {
- atomic_inc(&rdev->nr_pending);
- rcu_read_unlock();
- ret = r_queue->issue_flush_fn(r_queue, bdev->bd_disk,
- error_sector);
- rdev_dec_pending(rdev, mddev);
- rcu_read_lock();
- }
- }
- }
- rcu_read_unlock();
- return ret;
-}
-
static int raid1_congested(void *data, int bits)
{
mddev_t *mddev = data;
@@ -796,7 +758,7 @@ static int make_request(struct request_queue *q, struct bio * bio)
if (unlikely(!mddev->barriers_work && bio_barrier(bio))) {
if (rw == WRITE)
md_write_end(mddev);
- bio_endio(bio, bio->bi_size, -EOPNOTSUPP);
+ bio_endio(bio, -EOPNOTSUPP);
return 0;
}
@@ -1137,14 +1099,11 @@ abort:
}
-static int end_sync_read(struct bio *bio, unsigned int bytes_done, int error)
+static void end_sync_read(struct bio *bio, int error)
{
r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private);
int i;
- if (bio->bi_size)
- return 1;
-
for (i=r1_bio->mddev->raid_disks; i--; )
if (r1_bio->bios[i] == bio)
break;
@@ -1160,10 +1119,9 @@ static int end_sync_read(struct bio *bio, unsigned int bytes_done, int error)
if (atomic_dec_and_test(&r1_bio->remaining))
reschedule_retry(r1_bio);
- return 0;
}
-static int end_sync_write(struct bio *bio, unsigned int bytes_done, int error)
+static void end_sync_write(struct bio *bio, int error)
{
int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private);
@@ -1172,9 +1130,6 @@ static int end_sync_write(struct bio *bio, unsigned int bytes_done, int error)
int i;
int mirror=0;
- if (bio->bi_size)
- return 1;
-
for (i = 0; i < conf->raid_disks; i++)
if (r1_bio->bios[i] == bio) {
mirror = i;
@@ -1200,7 +1155,6 @@ static int end_sync_write(struct bio *bio, unsigned int bytes_done, int error)
md_done_sync(mddev, r1_bio->sectors, uptodate);
put_buf(r1_bio);
}
- return 0;
}
static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio)
@@ -1260,7 +1214,8 @@ static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio)
j = 0;
if (j >= 0)
mddev->resync_mismatches += r1_bio->sectors;
- if (j < 0 || test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) {
+ if (j < 0 || (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)
+ && test_bit(BIO_UPTODATE, &sbio->bi_flags))) {
sbio->bi_end_io = NULL;
rdev_dec_pending(conf->mirrors[i].rdev, mddev);
} else {
@@ -2013,7 +1968,6 @@ static int run(mddev_t *mddev)
mddev->array_size = mddev->size;
mddev->queue->unplug_fn = raid1_unplug;
- mddev->queue->issue_flush_fn = raid1_issue_flush;
mddev->queue->backing_dev_info.congested_fn = raid1_congested;
mddev->queue->backing_dev_info.congested_data = mddev;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 4e53792aa520..fc6607acb6e4 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -227,7 +227,7 @@ static void raid_end_bio_io(r10bio_t *r10_bio)
{
struct bio *bio = r10_bio->master_bio;
- bio_endio(bio, bio->bi_size,
+ bio_endio(bio,
test_bit(R10BIO_Uptodate, &r10_bio->state) ? 0 : -EIO);
free_r10bio(r10_bio);
}
@@ -243,15 +243,13 @@ static inline void update_head_pos(int slot, r10bio_t *r10_bio)
r10_bio->devs[slot].addr + (r10_bio->sectors);
}
-static int raid10_end_read_request(struct bio *bio, unsigned int bytes_done, int error)
+static void raid10_end_read_request(struct bio *bio, int error)
{
int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private);
int slot, dev;
conf_t *conf = mddev_to_conf(r10_bio->mddev);
- if (bio->bi_size)
- return 1;
slot = r10_bio->read_slot;
dev = r10_bio->devs[slot].devnum;
@@ -284,19 +282,15 @@ static int raid10_end_read_request(struct bio *bio, unsigned int bytes_done, int
}
rdev_dec_pending(conf->mirrors[dev].rdev, conf->mddev);
- return 0;
}
-static int raid10_end_write_request(struct bio *bio, unsigned int bytes_done, int error)
+static void raid10_end_write_request(struct bio *bio, int error)
{
int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private);
int slot, dev;
conf_t *conf = mddev_to_conf(r10_bio->mddev);
- if (bio->bi_size)
- return 1;
-
for (slot = 0; slot < conf->copies; slot++)
if (r10_bio->devs[slot].bio == bio)
break;
@@ -339,7 +333,6 @@ static int raid10_end_write_request(struct bio *bio, unsigned int bytes_done, in
}
rdev_dec_pending(conf->mirrors[dev].rdev, conf->mddev);
- return 0;
}
@@ -618,36 +611,6 @@ static void raid10_unplug(struct request_queue *q)
md_wakeup_thread(mddev->thread);
}
-static int raid10_issue_flush(struct request_queue *q, struct gendisk *disk,
- sector_t *error_sector)
-{
- mddev_t *mddev = q->queuedata;
- conf_t *conf = mddev_to_conf(mddev);
- int i, ret = 0;
-
- rcu_read_lock();
- for (i=0; i<mddev->raid_disks && ret == 0; i++) {
- mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
- if (rdev && !test_bit(Faulty, &rdev->flags)) {
- struct block_device *bdev = rdev->bdev;
- struct request_queue *r_queue = bdev_get_queue(bdev);
-
- if (!r_queue->issue_flush_fn)
- ret = -EOPNOTSUPP;
- else {
- atomic_inc(&rdev->nr_pending);
- rcu_read_unlock();
- ret = r_queue->issue_flush_fn(r_queue, bdev->bd_disk,
- error_sector);
- rdev_dec_pending(rdev, mddev);
- rcu_read_lock();
- }
- }
- }
- rcu_read_unlock();
- return ret;
-}
-
static int raid10_congested(void *data, int bits)
{
mddev_t *mddev = data;
@@ -787,7 +750,7 @@ static int make_request(struct request_queue *q, struct bio * bio)
unsigned long flags;
if (unlikely(bio_barrier(bio))) {
- bio_endio(bio, bio->bi_size, -EOPNOTSUPP);
+ bio_endio(bio, -EOPNOTSUPP);
return 0;
}
@@ -819,7 +782,7 @@ static int make_request(struct request_queue *q, struct bio * bio)
" or bigger than %dk %llu %d\n", chunk_sects/2,
(unsigned long long)bio->bi_sector, bio->bi_size >> 10);
- bio_io_error(bio, bio->bi_size);
+ bio_io_error(bio);
return 0;
}
@@ -1155,15 +1118,12 @@ abort:
}
-static int end_sync_read(struct bio *bio, unsigned int bytes_done, int error)
+static void end_sync_read(struct bio *bio, int error)
{
r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private);
conf_t *conf = mddev_to_conf(r10_bio->mddev);
int i,d;
- if (bio->bi_size)
- return 1;
-
for (i=0; i<conf->copies; i++)
if (r10_bio->devs[i].bio == bio)
break;
@@ -1192,10 +1152,9 @@ static int end_sync_read(struct bio *bio, unsigned int bytes_done, int error)
reschedule_retry(r10_bio);
}
rdev_dec_pending(conf->mirrors[d].rdev, conf->mddev);
- return 0;
}
-static int end_sync_write(struct bio *bio, unsigned int bytes_done, int error)
+static void end_sync_write(struct bio *bio, int error)
{
int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private);
@@ -1203,9 +1162,6 @@ static int end_sync_write(struct bio *bio, unsigned int bytes_done, int error)
conf_t *conf = mddev_to_conf(mddev);
int i,d;
- if (bio->bi_size)
- return 1;
-
for (i = 0; i < conf->copies; i++)
if (r10_bio->devs[i].bio == bio)
break;
@@ -1228,7 +1184,6 @@ static int end_sync_write(struct bio *bio, unsigned int bytes_done, int error)
}
}
rdev_dec_pending(conf->mirrors[d].rdev, mddev);
- return 0;
}
/*
@@ -1374,7 +1329,7 @@ static void recovery_request_write(mddev_t *mddev, r10bio_t *r10_bio)
if (test_bit(R10BIO_Uptodate, &r10_bio->state))
generic_make_request(wbio);
else
- bio_endio(wbio, wbio->bi_size, -EIO);
+ bio_endio(wbio, -EIO);
}
@@ -2133,7 +2088,6 @@ static int run(mddev_t *mddev)
mddev->resync_max_sectors = size << conf->chunk_shift;
mddev->queue->unplug_fn = raid10_unplug;
- mddev->queue->issue_flush_fn = raid10_issue_flush;
mddev->queue->backing_dev_info.congested_fn = raid10_congested;
mddev->queue->backing_dev_info.congested_data = mddev;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index f96dea975fa5..8ee181a01f52 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -108,12 +108,11 @@ static void return_io(struct bio *return_bi)
{
struct bio *bi = return_bi;
while (bi) {
- int bytes = bi->bi_size;
return_bi = bi->bi_next;
bi->bi_next = NULL;
bi->bi_size = 0;
- bi->bi_end_io(bi, bytes,
+ bi->bi_end_io(bi,
test_bit(BIO_UPTODATE, &bi->bi_flags)
? 0 : -EIO);
bi = return_bi;
@@ -382,10 +381,10 @@ static unsigned long get_stripe_work(struct stripe_head *sh)
return pending;
}
-static int
-raid5_end_read_request(struct bio *bi, unsigned int bytes_done, int error);
-static int
-raid5_end_write_request (struct bio *bi, unsigned int bytes_done, int error);
+static void
+raid5_end_read_request(struct bio *bi, int error);
+static void
+raid5_end_write_request(struct bio *bi, int error);
static void ops_run_io(struct stripe_head *sh)
{
@@ -1110,8 +1109,7 @@ static void shrink_stripes(raid5_conf_t *conf)
conf->slab_cache = NULL;
}
-static int raid5_end_read_request(struct bio * bi, unsigned int bytes_done,
- int error)
+static void raid5_end_read_request(struct bio * bi, int error)
{
struct stripe_head *sh = bi->bi_private;
raid5_conf_t *conf = sh->raid_conf;
@@ -1120,8 +1118,6 @@ static int raid5_end_read_request(struct bio * bi, unsigned int bytes_done,
char b[BDEVNAME_SIZE];
mdk_rdev_t *rdev;
- if (bi->bi_size)
- return 1;
for (i=0 ; i<disks; i++)
if (bi == &sh->dev[i].req)
@@ -1132,7 +1128,7 @@ static int raid5_end_read_request(struct bio * bi, unsigned int bytes_done,
uptodate);
if (i == disks) {
BUG();
- return 0;
+ return;
}
if (uptodate) {
@@ -1185,20 +1181,15 @@ static int raid5_end_read_request(struct bio * bi, unsigned int bytes_done,
clear_bit(R5_LOCKED, &sh->dev[i].flags);
set_bit(STRIPE_HANDLE, &sh->state);
release_stripe(sh);
- return 0;
}
-static int raid5_end_write_request (struct bio *bi, unsigned int bytes_done,
- int error)
+static void raid5_end_write_request (struct bio *bi, int error)
{
struct stripe_head *sh = bi->bi_private;
raid5_conf_t *conf = sh->raid_conf;
int disks = sh->disks, i;
int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
- if (bi->bi_size)
- return 1;
-
for (i=0 ; i<disks; i++)
if (bi == &sh->dev[i].req)
break;
@@ -1208,7 +1199,7 @@ static int raid5_end_write_request (struct bio *bi, unsigned int bytes_done,
uptodate);
if (i == disks) {
BUG();
- return 0;
+ return;
}
if (!uptodate)
@@ -1219,7 +1210,6 @@ static int raid5_end_write_request (struct bio *bi, unsigned int bytes_done,
clear_bit(R5_LOCKED, &sh->dev[i].flags);
set_bit(STRIPE_HANDLE, &sh->state);
release_stripe(sh);
- return 0;
}
@@ -3214,36 +3204,6 @@ static void raid5_unplug_device(struct request_queue *q)
unplug_slaves(mddev);
}
-static int raid5_issue_flush(struct request_queue *q, struct gendisk *disk,
- sector_t *error_sector)
-{
- mddev_t *mddev = q->queuedata;
- raid5_conf_t *conf = mddev_to_conf(mddev);
- int i, ret = 0;
-
- rcu_read_lock();
- for (i=0; i<mddev->raid_disks && ret == 0; i++) {
- mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev);
- if (rdev && !test_bit(Faulty, &rdev->flags)) {
- struct block_device *bdev = rdev->bdev;
- struct request_queue *r_queue = bdev_get_queue(bdev);
-
- if (!r_queue->issue_flush_fn)
- ret = -EOPNOTSUPP;
- else {
- atomic_inc(&rdev->nr_pending);
- rcu_read_unlock();
- ret = r_queue->issue_flush_fn(r_queue, bdev->bd_disk,
- error_sector);
- rdev_dec_pending(rdev, mddev);
- rcu_read_lock();
- }
- }
- }
- rcu_read_unlock();
- return ret;
-}
-
static int raid5_congested(void *data, int bits)
{
mddev_t *mddev = data;
@@ -3340,7 +3300,7 @@ static struct bio *remove_bio_from_retry(raid5_conf_t *conf)
* first).
* If the read failed..
*/
-static int raid5_align_endio(struct bio *bi, unsigned int bytes, int error)
+static void raid5_align_endio(struct bio *bi, int error)
{
struct bio* raid_bi = bi->bi_private;
mddev_t *mddev;
@@ -3348,8 +3308,6 @@ static int raid5_align_endio(struct bio *bi, unsigned int bytes, int error)
int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
mdk_rdev_t *rdev;
- if (bi->bi_size)
- return 1;
bio_put(bi);
mddev = raid_bi->bi_bdev->bd_disk->queue->queuedata;
@@ -3360,17 +3318,16 @@ static int raid5_align_endio(struct bio *bi, unsigned int bytes, int error)
rdev_dec_pending(rdev, conf->mddev);
if (!error && uptodate) {
- bio_endio(raid_bi, bytes, 0);
+ bio_endio(raid_bi, 0);
if (atomic_dec_and_test(&conf->active_aligned_reads))
wake_up(&conf->wait_for_stripe);
- return 0;
+ return;
}
pr_debug("raid5_align_endio : io error...handing IO for a retry\n");
add_bio_to_retry(raid_bi, conf);
- return 0;
}
static int bio_fits_rdev(struct bio *bi)
@@ -3476,7 +3433,7 @@ static int make_request(struct request_queue *q, struct bio * bi)
int remaining;
if (unlikely(bio_barrier(bi))) {
- bio_endio(bi, bi->bi_size, -EOPNOTSUPP);
+ bio_endio(bi, -EOPNOTSUPP);
return 0;
}
@@ -3592,12 +3549,11 @@ static int make_request(struct request_queue *q, struct bio * bi)
remaining = --bi->bi_phys_segments;
spin_unlock_irq(&conf->device_lock);
if (remaining == 0) {
- int bytes = bi->bi_size;
if ( rw == WRITE )
md_write_end(mddev);
- bi->bi_size = 0;
- bi->bi_end_io(bi, bytes,
+
+ bi->bi_end_io(bi,
test_bit(BIO_UPTODATE, &bi->bi_flags)
? 0 : -EIO);
}
@@ -3875,10 +3831,8 @@ static int retry_aligned_read(raid5_conf_t *conf, struct bio *raid_bio)
remaining = --raid_bio->bi_phys_segments;
spin_unlock_irq(&conf->device_lock);
if (remaining == 0) {
- int bytes = raid_bio->bi_size;
- raid_bio->bi_size = 0;
- raid_bio->bi_end_io(raid_bio, bytes,
+ raid_bio->bi_end_io(raid_bio,
test_bit(BIO_UPTODATE, &raid_bio->bi_flags)
? 0 : -EIO);
}
@@ -4279,7 +4233,6 @@ static int run(mddev_t *mddev)
mdname(mddev));
mddev->queue->unplug_fn = raid5_unplug_device;
- mddev->queue->issue_flush_fn = raid5_issue_flush;
mddev->queue->backing_dev_info.congested_data = mddev;
mddev->queue->backing_dev_info.congested_fn = raid5_congested;