summaryrefslogtreecommitdiff
path: root/drivers/md
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/Kconfig14
-rw-r--r--drivers/md/Makefile2
-rw-r--r--drivers/md/bitmap.c95
-rw-r--r--drivers/md/dm-crypt.c177
-rw-r--r--drivers/md/dm-delay.c3
-rw-r--r--drivers/md/dm-exception-store.c129
-rw-r--r--drivers/md/dm-io.c2
-rw-r--r--drivers/md/dm-ioctl.c14
-rw-r--r--drivers/md/dm-kcopyd.c14
-rw-r--r--drivers/md/dm-linear.c49
-rw-r--r--drivers/md/dm-log.c6
-rw-r--r--drivers/md/dm-mpath.c108
-rw-r--r--drivers/md/dm-mpath.h2
-rw-r--r--drivers/md/dm-path-selector.c3
-rw-r--r--drivers/md/dm-raid1.c795
-rw-r--r--drivers/md/dm-region-hash.c704
-rw-r--r--drivers/md/dm-round-robin.c3
-rw-r--r--drivers/md/dm-snap.c174
-rw-r--r--drivers/md/dm-snap.h14
-rw-r--r--drivers/md/dm-stripe.c10
-rw-r--r--drivers/md/dm-table.c160
-rw-r--r--drivers/md/dm-zero.c2
-rw-r--r--drivers/md/dm.c166
-rw-r--r--drivers/md/dm.h25
-rw-r--r--drivers/md/faulty.c4
-rw-r--r--drivers/md/linear.c155
-rw-r--r--drivers/md/md.c760
-rw-r--r--drivers/md/multipath.c38
-rw-r--r--drivers/md/raid0.c23
-rw-r--r--drivers/md/raid1.c44
-rw-r--r--drivers/md/raid10.c54
-rw-r--r--drivers/md/raid5.c913
-rw-r--r--drivers/md/raid6.h9
33 files changed, 2602 insertions, 2069 deletions
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index 07d92c11b5d8..2281b5098e95 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -30,6 +30,20 @@ config BLK_DEV_MD
If unsure, say N.
+config MD_AUTODETECT
+ bool "Autodetect RAID arrays during kernel boot"
+ depends on BLK_DEV_MD=y
+ default y
+ ---help---
+ If you say Y here, then the kernel will try to autodetect raid
+ arrays as part of its boot process.
+
+ If you don't use raid and say Y, this autodetection can cause
+ a several-second delay in the boot time due to various
+ synchronisation steps that are part of this step.
+
+ If unsure, say Y.
+
config MD_LINEAR
tristate "Linear (append) mode"
depends on BLK_DEV_MD
diff --git a/drivers/md/Makefile b/drivers/md/Makefile
index f1ef33dfd8cf..1c615804ea76 100644
--- a/drivers/md/Makefile
+++ b/drivers/md/Makefile
@@ -34,7 +34,7 @@ obj-$(CONFIG_DM_CRYPT) += dm-crypt.o
obj-$(CONFIG_DM_DELAY) += dm-delay.o
obj-$(CONFIG_DM_MULTIPATH) += dm-multipath.o dm-round-robin.o
obj-$(CONFIG_DM_SNAPSHOT) += dm-snapshot.o
-obj-$(CONFIG_DM_MIRROR) += dm-mirror.o dm-log.o
+obj-$(CONFIG_DM_MIRROR) += dm-mirror.o dm-log.o dm-region-hash.o
obj-$(CONFIG_DM_ZERO) += dm-zero.o
quiet_cmd_unroll = UNROLL $@
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index b26927ce889c..ac89a5deaca2 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -225,7 +225,7 @@ static struct page *read_sb_page(mddev_t *mddev, long offset, unsigned long inde
|| test_bit(Faulty, &rdev->flags))
continue;
- target = (rdev->sb_offset << 1) + offset + index * (PAGE_SIZE/512);
+ target = rdev->sb_start + offset + index * (PAGE_SIZE/512);
if (sync_page_io(rdev->bdev, target, PAGE_SIZE, page, READ)) {
page->index = index;
@@ -238,15 +238,47 @@ static struct page *read_sb_page(mddev_t *mddev, long offset, unsigned long inde
}
+static mdk_rdev_t *next_active_rdev(mdk_rdev_t *rdev, mddev_t *mddev)
+{
+ /* Iterate the disks of an mddev, using rcu to protect access to the
+ * linked list, and raising the refcount of devices we return to ensure
+ * they don't disappear while in use.
+ * As devices are only added or removed when raid_disk is < 0 and
+ * nr_pending is 0 and In_sync is clear, the entries we return will
+ * still be in the same position on the list when we re-enter
+ * list_for_each_continue_rcu.
+ */
+ struct list_head *pos;
+ rcu_read_lock();
+ if (rdev == NULL)
+ /* start at the beginning */
+ pos = &mddev->disks;
+ else {
+ /* release the previous rdev and start from there. */
+ rdev_dec_pending(rdev, mddev);
+ pos = &rdev->same_set;
+ }
+ list_for_each_continue_rcu(pos, &mddev->disks) {
+ rdev = list_entry(pos, mdk_rdev_t, same_set);
+ if (rdev->raid_disk >= 0 &&
+ test_bit(In_sync, &rdev->flags) &&
+ !test_bit(Faulty, &rdev->flags)) {
+ /* this is a usable devices */
+ atomic_inc(&rdev->nr_pending);
+ rcu_read_unlock();
+ return rdev;
+ }
+ }
+ rcu_read_unlock();
+ return NULL;
+}
+
static int write_sb_page(struct bitmap *bitmap, struct page *page, int wait)
{
- mdk_rdev_t *rdev;
- struct list_head *tmp;
+ mdk_rdev_t *rdev = NULL;
mddev_t *mddev = bitmap->mddev;
- rdev_for_each(rdev, tmp, mddev)
- if (test_bit(In_sync, &rdev->flags)
- && !test_bit(Faulty, &rdev->flags)) {
+ while ((rdev = next_active_rdev(rdev, mddev)) != NULL) {
int size = PAGE_SIZE;
if (page->index == bitmap->file_pages-1)
size = roundup(bitmap->last_page_size,
@@ -260,32 +292,36 @@ static int write_sb_page(struct bitmap *bitmap, struct page *page, int wait)
+ (long)(page->index * (PAGE_SIZE/512))
+ size/512 > 0)
/* bitmap runs in to metadata */
- return -EINVAL;
+ goto bad_alignment;
if (rdev->data_offset + mddev->size*2
- > rdev->sb_offset*2 + bitmap->offset)
+ > rdev->sb_start + bitmap->offset)
/* data runs in to bitmap */
- return -EINVAL;
- } else if (rdev->sb_offset*2 < rdev->data_offset) {
+ goto bad_alignment;
+ } else if (rdev->sb_start < rdev->data_offset) {
/* METADATA BITMAP DATA */
- if (rdev->sb_offset*2
+ if (rdev->sb_start
+ bitmap->offset
+ page->index*(PAGE_SIZE/512) + size/512
> rdev->data_offset)
/* bitmap runs in to data */
- return -EINVAL;
+ goto bad_alignment;
} else {
/* DATA METADATA BITMAP - no problems */
}
md_super_write(mddev, rdev,
- (rdev->sb_offset<<1) + bitmap->offset
+ rdev->sb_start + bitmap->offset
+ page->index * (PAGE_SIZE/512),
size,
page);
- }
+ }
if (wait)
md_super_wait(mddev);
return 0;
+
+ bad_alignment:
+ rcu_read_unlock();
+ return -EINVAL;
}
static void bitmap_file_kick(struct bitmap *bitmap);
@@ -454,8 +490,11 @@ void bitmap_update_sb(struct bitmap *bitmap)
spin_unlock_irqrestore(&bitmap->lock, flags);
sb = (bitmap_super_t *)kmap_atomic(bitmap->sb_page, KM_USER0);
sb->events = cpu_to_le64(bitmap->mddev->events);
- if (!bitmap->mddev->degraded)
- sb->events_cleared = cpu_to_le64(bitmap->mddev->events);
+ if (bitmap->mddev->events < bitmap->events_cleared) {
+ /* rocking back to read-only */
+ bitmap->events_cleared = bitmap->mddev->events;
+ sb->events_cleared = cpu_to_le64(bitmap->events_cleared);
+ }
kunmap_atomic(sb, KM_USER0);
write_page(bitmap, bitmap->sb_page, 1);
}
@@ -1085,9 +1124,19 @@ void bitmap_daemon_work(struct bitmap *bitmap)
} else
spin_unlock_irqrestore(&bitmap->lock, flags);
lastpage = page;
-/*
- printk("bitmap clean at page %lu\n", j);
-*/
+
+ /* We are possibly going to clear some bits, so make
+ * sure that events_cleared is up-to-date.
+ */
+ if (bitmap->need_sync) {
+ bitmap_super_t *sb;
+ bitmap->need_sync = 0;
+ sb = kmap_atomic(bitmap->sb_page, KM_USER0);
+ sb->events_cleared =
+ cpu_to_le64(bitmap->events_cleared);
+ kunmap_atomic(sb, KM_USER0);
+ write_page(bitmap, bitmap->sb_page, 1);
+ }
spin_lock_irqsave(&bitmap->lock, flags);
clear_page_attr(bitmap, page, BITMAP_PAGE_CLEAN);
}
@@ -1216,7 +1265,7 @@ int bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sect
case 0:
bitmap_file_set_bit(bitmap, offset);
bitmap_count_page(bitmap,offset, 1);
- blk_plug_device(bitmap->mddev->queue);
+ blk_plug_device_unlocked(bitmap->mddev->queue);
/* fall through */
case 1:
*bmc = 2;
@@ -1257,6 +1306,12 @@ void bitmap_endwrite(struct bitmap *bitmap, sector_t offset, unsigned long secto
return;
}
+ if (success &&
+ bitmap->events_cleared < bitmap->mddev->events) {
+ bitmap->events_cleared = bitmap->mddev->events;
+ bitmap->need_sync = 1;
+ }
+
if (!success && ! (*bmc & NEEDED_MASK))
*bmc |= NEEDED_MASK;
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index ab6a61db63ce..ce26c84af064 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -23,7 +23,7 @@
#include <asm/page.h>
#include <asm/unaligned.h>
-#include "dm.h"
+#include <linux/device-mapper.h>
#define DM_MSG_PREFIX "crypt"
#define MESG_STR(x) x, sizeof(x)
@@ -56,6 +56,7 @@ struct dm_crypt_io {
atomic_t pending;
int error;
sector_t sector;
+ struct dm_crypt_io *base_io;
};
struct dm_crypt_request {
@@ -93,7 +94,6 @@ struct crypt_config {
struct workqueue_struct *io_queue;
struct workqueue_struct *crypt_queue;
- wait_queue_head_t writeq;
/*
* crypto related data
@@ -333,7 +333,6 @@ static void crypt_convert_init(struct crypt_config *cc,
ctx->idx_out = bio_out ? bio_out->bi_idx : 0;
ctx->sector = sector + cc->iv_offset;
init_completion(&ctx->restart);
- atomic_set(&ctx->pending, 1);
}
static int crypt_convert_block(struct crypt_config *cc,
@@ -408,6 +407,8 @@ static int crypt_convert(struct crypt_config *cc,
{
int r;
+ atomic_set(&ctx->pending, 1);
+
while(ctx->idx_in < ctx->bio_in->bi_vcnt &&
ctx->idx_out < ctx->bio_out->bi_vcnt) {
@@ -456,9 +457,11 @@ static void dm_crypt_bio_destructor(struct bio *bio)
/*
* Generate a new unfragmented bio with the given size
* This should never violate the device limitations
- * May return a smaller bio when running out of pages
+ * May return a smaller bio when running out of pages, indicated by
+ * *out_of_pages set to 1.
*/
-static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size)
+static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size,
+ unsigned *out_of_pages)
{
struct crypt_config *cc = io->target->private;
struct bio *clone;
@@ -472,11 +475,14 @@ static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size)
return NULL;
clone_init(io, clone);
+ *out_of_pages = 0;
for (i = 0; i < nr_iovecs; i++) {
page = mempool_alloc(cc->page_pool, gfp_mask);
- if (!page)
+ if (!page) {
+ *out_of_pages = 1;
break;
+ }
/*
* if additional pages cannot be allocated without waiting,
@@ -517,9 +523,32 @@ static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone)
}
}
+static struct dm_crypt_io *crypt_io_alloc(struct dm_target *ti,
+ struct bio *bio, sector_t sector)
+{
+ struct crypt_config *cc = ti->private;
+ struct dm_crypt_io *io;
+
+ io = mempool_alloc(cc->io_pool, GFP_NOIO);
+ io->target = ti;
+ io->base_bio = bio;
+ io->sector = sector;
+ io->error = 0;
+ io->base_io = NULL;
+ atomic_set(&io->pending, 0);
+
+ return io;
+}
+
+static void crypt_inc_pending(struct dm_crypt_io *io)
+{
+ atomic_inc(&io->pending);
+}
+
/*
* One of the bios was finished. Check for completion of
* the whole request and correctly clean up the buffer.
+ * If base_io is set, wait for the last fragment to complete.
*/
static void crypt_dec_pending(struct dm_crypt_io *io)
{
@@ -528,7 +557,14 @@ static void crypt_dec_pending(struct dm_crypt_io *io)
if (!atomic_dec_and_test(&io->pending))
return;
- bio_endio(io->base_bio, io->error);
+ if (likely(!io->base_io))
+ bio_endio(io->base_bio, io->error);
+ else {
+ if (io->error && !io->base_io->error)
+ io->base_io->error = io->error;
+ crypt_dec_pending(io->base_io);
+ }
+
mempool_free(io, cc->io_pool);
}
@@ -591,7 +627,7 @@ static void kcryptd_io_read(struct dm_crypt_io *io)
struct bio *base_bio = io->base_bio;
struct bio *clone;
- atomic_inc(&io->pending);
+ crypt_inc_pending(io);
/*
* The block layer might modify the bvec array, so always
@@ -619,10 +655,7 @@ static void kcryptd_io_read(struct dm_crypt_io *io)
static void kcryptd_io_write(struct dm_crypt_io *io)
{
struct bio *clone = io->ctx.bio_out;
- struct crypt_config *cc = io->target->private;
-
generic_make_request(clone);
- wake_up(&cc->writeq);
}
static void kcryptd_io(struct work_struct *work)
@@ -653,6 +686,7 @@ static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io,
crypt_free_buffer_pages(cc, clone);
bio_put(clone);
io->error = -EIO;
+ crypt_dec_pending(io);
return;
}
@@ -660,70 +694,100 @@ static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io,
BUG_ON(io->ctx.idx_out < clone->bi_vcnt);
clone->bi_sector = cc->start + io->sector;
- io->sector += bio_sectors(clone);
if (async)
kcryptd_queue_io(io);
- else {
- atomic_inc(&io->pending);
+ else
generic_make_request(clone);
- }
}
-static void kcryptd_crypt_write_convert_loop(struct dm_crypt_io *io)
+static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
{
struct crypt_config *cc = io->target->private;
struct bio *clone;
+ struct dm_crypt_io *new_io;
+ int crypt_finished;
+ unsigned out_of_pages = 0;
unsigned remaining = io->base_bio->bi_size;
+ sector_t sector = io->sector;
int r;
/*
+ * Prevent io from disappearing until this function completes.
+ */
+ crypt_inc_pending(io);
+ crypt_convert_init(cc, &io->ctx, NULL, io->base_bio, sector);
+
+ /*
* The allocated buffers can be smaller than the whole bio,
* so repeat the whole process until all the data can be handled.
*/
while (remaining) {
- clone = crypt_alloc_buffer(io, remaining);
+ clone = crypt_alloc_buffer(io, remaining, &out_of_pages);
if (unlikely(!clone)) {
io->error = -ENOMEM;
- return;
+ break;
}
io->ctx.bio_out = clone;
io->ctx.idx_out = 0;
remaining -= clone->bi_size;
+ sector += bio_sectors(clone);
+ crypt_inc_pending(io);
r = crypt_convert(cc, &io->ctx);
+ crypt_finished = atomic_dec_and_test(&io->ctx.pending);
- if (atomic_dec_and_test(&io->ctx.pending)) {
- /* processed, no running async crypto */
+ /* Encryption was already finished, submit io now */
+ if (crypt_finished) {
kcryptd_crypt_write_io_submit(io, r, 0);
+
+ /*
+ * If there was an error, do not try next fragments.
+ * For async, error is processed in async handler.
+ */
if (unlikely(r < 0))
- return;
- } else
- atomic_inc(&io->pending);
-
- /* out of memory -> run queues */
- if (unlikely(remaining)) {
- /* wait for async crypto then reinitialize pending */
- wait_event(cc->writeq, !atomic_read(&io->ctx.pending));
- atomic_set(&io->ctx.pending, 1);
- congestion_wait(WRITE, HZ/100);
- }
- }
-}
+ break;
-static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
-{
- struct crypt_config *cc = io->target->private;
+ io->sector = sector;
+ }
- /*
- * Prevent io from disappearing until this function completes.
- */
- atomic_inc(&io->pending);
+ /*
+ * Out of memory -> run queues
+ * But don't wait if split was due to the io size restriction
+ */
+ if (unlikely(out_of_pages))
+ congestion_wait(WRITE, HZ/100);
- crypt_convert_init(cc, &io->ctx, NULL, io->base_bio, io->sector);
- kcryptd_crypt_write_convert_loop(io);
+ /*
+ * With async crypto it is unsafe to share the crypto context
+ * between fragments, so switch to a new dm_crypt_io structure.
+ */
+ if (unlikely(!crypt_finished && remaining)) {
+ new_io = crypt_io_alloc(io->target, io->base_bio,
+ sector);
+ crypt_inc_pending(new_io);
+ crypt_convert_init(cc, &new_io->ctx, NULL,
+ io->base_bio, sector);
+ new_io->ctx.idx_in = io->ctx.idx_in;
+ new_io->ctx.offset_in = io->ctx.offset_in;
+
+ /*
+ * Fragments after the first use the base_io
+ * pending count.
+ */
+ if (!io->base_io)
+ new_io->base_io = io;
+ else {
+ new_io->base_io = io->base_io;
+ crypt_inc_pending(io->base_io);
+ crypt_dec_pending(io);
+ }
+
+ io = new_io;
+ }
+ }
crypt_dec_pending(io);
}
@@ -741,7 +805,7 @@ static void kcryptd_crypt_read_convert(struct dm_crypt_io *io)
struct crypt_config *cc = io->target->private;
int r = 0;
- atomic_inc(&io->pending);
+ crypt_inc_pending(io);
crypt_convert_init(cc, &io->ctx, io->base_bio, io->base_bio,
io->sector);
@@ -1049,7 +1113,6 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
goto bad_crypt_queue;
}
- init_waitqueue_head(&cc->writeq);
ti->private = cc;
return 0;
@@ -1108,15 +1171,9 @@ static void crypt_dtr(struct dm_target *ti)
static int crypt_map(struct dm_target *ti, struct bio *bio,
union map_info *map_context)
{
- struct crypt_config *cc = ti->private;
struct dm_crypt_io *io;
- io = mempool_alloc(cc->io_pool, GFP_NOIO);
- io->target = ti;
- io->base_bio = bio;
- io->sector = bio->bi_sector - ti->begin;
- io->error = 0;
- atomic_set(&io->pending, 0);
+ io = crypt_io_alloc(ti, bio, bio->bi_sector - ti->begin);
if (bio_data_dir(io->base_bio) == READ)
kcryptd_queue_io(io);
@@ -1216,9 +1273,24 @@ error:
return -EINVAL;
}
+static int crypt_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
+ struct bio_vec *biovec, int max_size)
+{
+ struct crypt_config *cc = ti->private;
+ struct request_queue *q = bdev_get_queue(cc->dev->bdev);
+
+ if (!q->merge_bvec_fn)
+ return max_size;
+
+ bvm->bi_bdev = cc->dev->bdev;
+ bvm->bi_sector = cc->start + bvm->bi_sector - ti->begin;
+
+ return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
+}
+
static struct target_type crypt_target = {
.name = "crypt",
- .version= {1, 5, 0},
+ .version= {1, 6, 0},
.module = THIS_MODULE,
.ctr = crypt_ctr,
.dtr = crypt_dtr,
@@ -1228,6 +1300,7 @@ static struct target_type crypt_target = {
.preresume = crypt_preresume,
.resume = crypt_resume,
.message = crypt_message,
+ .merge = crypt_merge,
};
static int __init dm_crypt_init(void)
diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c
index bdd37f881c42..848b381f1173 100644
--- a/drivers/md/dm-delay.c
+++ b/drivers/md/dm-delay.c
@@ -13,7 +13,8 @@
#include <linux/bio.h>
#include <linux/slab.h>
-#include "dm.h"
+#include <linux/device-mapper.h>
+
#include "dm-bio-list.h"
#define DM_MSG_PREFIX "delay"
diff --git a/drivers/md/dm-exception-store.c b/drivers/md/dm-exception-store.c
index 41f408068a7c..01590f3e0009 100644
--- a/drivers/md/dm-exception-store.c
+++ b/drivers/md/dm-exception-store.c
@@ -7,7 +7,6 @@
* This file is released under the GPL.
*/
-#include "dm.h"
#include "dm-snap.h"
#include <linux/mm.h>
@@ -105,15 +104,20 @@ struct pstore {
void *area;
/*
+ * An area of zeros used to clear the next area.
+ */
+ void *zero_area;
+
+ /*
* Used to keep track of which metadata area the data in
* 'chunk' refers to.
*/
- uint32_t current_area;
+ chunk_t current_area;
/*
* The next free chunk for an exception.
*/
- uint32_t next_free;
+ chunk_t next_free;
/*
* The index of next free exception in the current
@@ -149,6 +153,13 @@ static int alloc_area(struct pstore *ps)
if (!ps->area)
return r;
+ ps->zero_area = vmalloc(len);
+ if (!ps->zero_area) {
+ vfree(ps->area);
+ return r;
+ }
+ memset(ps->zero_area, 0, len);
+
return 0;
}
@@ -156,6 +167,8 @@ static void free_area(struct pstore *ps)
{
vfree(ps->area);
ps->area = NULL;
+ vfree(ps->zero_area);
+ ps->zero_area = NULL;
}
struct mdata_req {
@@ -175,7 +188,7 @@ static void do_metadata(struct work_struct *work)
/*
* Read or write a chunk aligned and sized block of data from a device.
*/
-static int chunk_io(struct pstore *ps, uint32_t chunk, int rw, int metadata)
+static int chunk_io(struct pstore *ps, chunk_t chunk, int rw, int metadata)
{
struct dm_io_region where = {
.bdev = ps->snap->cow->bdev,
@@ -209,29 +222,52 @@ static int chunk_io(struct pstore *ps, uint32_t chunk, int rw, int metadata)
}
/*
+ * Convert a metadata area index to a chunk index.
+ */
+static chunk_t area_location(struct pstore *ps, chunk_t area)
+{
+ return 1 + ((ps->exceptions_per_area + 1) * area);
+}
+
+/*
* Read or write a metadata area. Remembering to skip the first
* chunk which holds the header.
*/
-static int area_io(struct pstore *ps, uint32_t area, int rw)
+static int area_io(struct pstore *ps, int rw)
{
int r;
- uint32_t chunk;
+ chunk_t chunk;
- /* convert a metadata area index to a chunk index */
- chunk = 1 + ((ps->exceptions_per_area + 1) * area);
+ chunk = area_location(ps, ps->current_area);
r = chunk_io(ps, chunk, rw, 0);
if (r)
return r;
- ps->current_area = area;
return 0;
}
-static int zero_area(struct pstore *ps, uint32_t area)
+static void zero_memory_area(struct pstore *ps)
{
memset(ps->area, 0, ps->snap->chunk_size << SECTOR_SHIFT);
- return area_io(ps, area, WRITE);
+}
+
+static int zero_disk_area(struct pstore *ps, chunk_t area)
+{
+ struct dm_io_region where = {
+ .bdev = ps->snap->cow->bdev,
+ .sector = ps->snap->chunk_size * area_location(ps, area),
+ .count = ps->snap->chunk_size,
+ };
+ struct dm_io_request io_req = {
+ .bi_rw = WRITE,
+ .mem.type = DM_IO_VMA,
+ .mem.ptr.vma = ps->zero_area,
+ .client = ps->io_client,
+ .notify.fn = NULL,
+ };
+
+ return dm_io(&io_req, 1, &where, NULL);
}
static int read_header(struct pstore *ps, int *new_snapshot)
@@ -404,15 +440,14 @@ static int insert_exceptions(struct pstore *ps, int *full)
static int read_exceptions(struct pstore *ps)
{
- uint32_t area;
int r, full = 1;
/*
* Keeping reading chunks and inserting exceptions until
* we find a partially full area.
*/
- for (area = 0; full; area++) {
- r = area_io(ps, area, READ);
+ for (ps->current_area = 0; full; ps->current_area++) {
+ r = area_io(ps, READ);
if (r)
return r;
@@ -421,6 +456,8 @@ static int read_exceptions(struct pstore *ps)
return r;
}
+ ps->current_area--;
+
return 0;
}
@@ -479,12 +516,13 @@ static int persistent_read_metadata(struct exception_store *store)
return r;
}
- r = zero_area(ps, 0);
+ ps->current_area = 0;
+ zero_memory_area(ps);
+ r = zero_disk_area(ps, 0);
if (r) {
- DMWARN("zero_area(0) failed");
+ DMWARN("zero_disk_area(0) failed");
return r;
}
-
} else {
/*
* Sanity checks.
@@ -517,6 +555,7 @@ static int persistent_prepare(struct exception_store *store,
{
struct pstore *ps = get_info(store);
uint32_t stride;
+ chunk_t next_free;
sector_t size = get_dev_size(store->snap->cow->bdev);
/* Is there enough room ? */
@@ -530,7 +569,8 @@ static int persistent_prepare(struct exception_store *store,
* into account the location of the metadata chunks.
*/
stride = (ps->exceptions_per_area + 1);
- if ((++ps->next_free % stride) == 1)
+ next_free = ++ps->next_free;
+ if (sector_div(next_free, stride) == 1)
ps->next_free++;
atomic_inc(&ps->pending_count);
@@ -542,7 +582,6 @@ static void persistent_commit(struct exception_store *store,
void (*callback) (void *, int success),
void *callback_context)
{
- int r;
unsigned int i;
struct pstore *ps = get_info(store);
struct disk_exception de;
@@ -563,33 +602,41 @@ static void persistent_commit(struct exception_store *store,
cb->context = callback_context;
/*
- * If there are no more exceptions in flight, or we have
- * filled this metadata area we commit the exceptions to
- * disk.
+ * If there are exceptions in flight and we have not yet
+ * filled this metadata area there's nothing more to do.
*/
- if (atomic_dec_and_test(&ps->pending_count) ||
- (ps->current_committed == ps->exceptions_per_area)) {
- r = area_io(ps, ps->current_area, WRITE);
- if (r)
- ps->valid = 0;
+ if (!atomic_dec_and_test(&ps->pending_count) &&
+ (ps->current_committed != ps->exceptions_per_area))
+ return;
- /*
- * Have we completely filled the current area ?
- */
- if (ps->current_committed == ps->exceptions_per_area) {
- ps->current_committed = 0;
- r = zero_area(ps, ps->current_area + 1);
- if (r)
- ps->valid = 0;
- }
+ /*
+ * If we completely filled the current area, then wipe the next one.
+ */
+ if ((ps->current_committed == ps->exceptions_per_area) &&
+ zero_disk_area(ps, ps->current_area + 1))
+ ps->valid = 0;
- for (i = 0; i < ps->callback_count; i++) {
- cb = ps->callbacks + i;
- cb->callback(cb->context, r == 0 ? 1 : 0);
- }
+ /*
+ * Commit exceptions to disk.
+ */
+ if (ps->valid && area_io(ps, WRITE))
+ ps->valid = 0;
+
+ /*
+ * Advance to the next area if this one is full.
+ */
+ if (ps->current_committed == ps->exceptions_per_area) {
+ ps->current_committed = 0;
+ ps->current_area++;
+ zero_memory_area(ps);
+ }
- ps->callback_count = 0;
+ for (i = 0; i < ps->callback_count; i++) {
+ cb = ps->callbacks + i;
+ cb->callback(cb->context, ps->valid);
}
+
+ ps->callback_count = 0;
}
static void persistent_drop(struct exception_store *store)
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index 4789c42d9a3a..2fd6d4450637 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -5,7 +5,7 @@
* This file is released under the GPL.
*/
-#include "dm.h"
+#include <linux/device-mapper.h>
#include <linux/bio.h>
#include <linux/mempool.h>
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index b262c0042de3..777c948180f9 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -426,7 +426,7 @@ static int list_devices(struct dm_ioctl *param, size_t param_size)
old_nl->next = (uint32_t) ((void *) nl -
(void *) old_nl);
disk = dm_disk(hc->md);
- nl->dev = huge_encode_dev(MKDEV(disk->major, disk->first_minor));
+ nl->dev = huge_encode_dev(disk_devt(disk));
nl->next = 0;
strcpy(nl->name, hc->name);
@@ -539,7 +539,7 @@ static int __dev_status(struct mapped_device *md, struct dm_ioctl *param)
if (dm_suspended(md))
param->flags |= DM_SUSPEND_FLAG;
- param->dev = huge_encode_dev(MKDEV(disk->major, disk->first_minor));
+ param->dev = huge_encode_dev(disk_devt(disk));
/*
* Yes, this will be out of date by the time it gets back
@@ -548,7 +548,7 @@ static int __dev_status(struct mapped_device *md, struct dm_ioctl *param)
*/
param->open_count = dm_open_count(md);
- if (disk->policy)
+ if (get_disk_ro(disk))
param->flags |= DM_READONLY_FLAG;
param->event_nr = dm_get_event_nr(md);
@@ -988,9 +988,9 @@ static int dev_wait(struct dm_ioctl *param, size_t param_size)
return r;
}
-static inline int get_mode(struct dm_ioctl *param)
+static inline fmode_t get_mode(struct dm_ioctl *param)
{
- int mode = FMODE_READ | FMODE_WRITE;
+ fmode_t mode = FMODE_READ | FMODE_WRITE;
if (param->flags & DM_READONLY_FLAG)
mode = FMODE_READ;
@@ -1131,7 +1131,7 @@ static void retrieve_deps(struct dm_table *table,
unsigned int count = 0;
struct list_head *tmp;
size_t len, needed;
- struct dm_dev *dd;
+ struct dm_dev_internal *dd;
struct dm_target_deps *deps;
deps = get_result_buffer(param, param_size, &len);
@@ -1157,7 +1157,7 @@ static void retrieve_deps(struct dm_table *table,
deps->count = count;
count = 0;
list_for_each_entry (dd, dm_table_get_devices(table), list)
- deps->dev[count++] = huge_encode_dev(dd->bdev->bd_dev);
+ deps->dev[count++] = huge_encode_dev(dd->dm_dev.bdev->bd_dev);
param->data_size = param->data_start + needed;
}
diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c
index 996802b8a452..3073618269ea 100644
--- a/drivers/md/dm-kcopyd.c
+++ b/drivers/md/dm-kcopyd.c
@@ -22,6 +22,7 @@
#include <linux/vmalloc.h>
#include <linux/workqueue.h>
#include <linux/mutex.h>
+#include <linux/device-mapper.h>
#include <linux/dm-kcopyd.h>
#include "dm.h"
@@ -268,6 +269,17 @@ static void push(struct list_head *jobs, struct kcopyd_job *job)
spin_unlock_irqrestore(&kc->job_lock, flags);
}
+
+static void push_head(struct list_head *jobs, struct kcopyd_job *job)
+{
+ unsigned long flags;
+ struct dm_kcopyd_client *kc = job->kc;
+
+ spin_lock_irqsave(&kc->job_lock, flags);
+ list_add(&job->list, jobs);
+ spin_unlock_irqrestore(&kc->job_lock, flags);
+}
+
/*
* These three functions process 1 item from the corresponding
* job list.
@@ -398,7 +410,7 @@ static int process_jobs(struct list_head *jobs, struct dm_kcopyd_client *kc,
* We couldn't service this job ATM, so
* push this job back onto the list.
*/
- push(jobs, job);
+ push_head(jobs, job);
break;
}
diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c
index 17753d80ad22..44042becad8a 100644
--- a/drivers/md/dm-linear.c
+++ b/drivers/md/dm-linear.c
@@ -5,12 +5,12 @@
*/
#include "dm.h"
-
#include <linux/module.h>
#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/bio.h>
#include <linux/slab.h>
+#include <linux/device-mapper.h>
#define DM_MSG_PREFIX "linear"
@@ -69,13 +69,25 @@ static void linear_dtr(struct dm_target *ti)
kfree(lc);
}
-static int linear_map(struct dm_target *ti, struct bio *bio,
- union map_info *map_context)
+static sector_t linear_map_sector(struct dm_target *ti, sector_t bi_sector)
{
- struct linear_c *lc = (struct linear_c *) ti->private;
+ struct linear_c *lc = ti->private;
+
+ return lc->start + (bi_sector - ti->begin);
+}
+
+static void linear_map_bio(struct dm_target *ti, struct bio *bio)
+{
+ struct linear_c *lc = ti->private;
bio->bi_bdev = lc->dev->bdev;
- bio->bi_sector = lc->start + (bio->bi_sector - ti->begin);
+ bio->bi_sector = linear_map_sector(ti, bio->bi_sector);
+}
+
+static int linear_map(struct dm_target *ti, struct bio *bio,
+ union map_info *map_context)
+{
+ linear_map_bio(ti, bio);
return DM_MAPIO_REMAPPED;
}
@@ -98,31 +110,38 @@ static int linear_status(struct dm_target *ti, status_type_t type,
return 0;
}
-static int linear_ioctl(struct dm_target *ti, struct inode *inode,
- struct file *filp, unsigned int cmd,
+static int linear_ioctl(struct dm_target *ti, unsigned int cmd,
unsigned long arg)
{
struct linear_c *lc = (struct linear_c *) ti->private;
- struct block_device *bdev = lc->dev->bdev;
- struct file fake_file = {};
- struct dentry fake_dentry = {};
+ return __blkdev_driver_ioctl(lc->dev->bdev, lc->dev->mode, cmd, arg);
+}
+
+static int linear_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
+ struct bio_vec *biovec, int max_size)
+{
+ struct linear_c *lc = ti->private;
+ struct request_queue *q = bdev_get_queue(lc->dev->bdev);
+
+ if (!q->merge_bvec_fn)
+ return max_size;
- fake_file.f_mode = lc->dev->mode;
- fake_file.f_path.dentry = &fake_dentry;
- fake_dentry.d_inode = bdev->bd_inode;
+ bvm->bi_bdev = lc->dev->bdev;
+ bvm->bi_sector = linear_map_sector(ti, bvm->bi_sector);
- return blkdev_driver_ioctl(bdev->bd_inode, &fake_file, bdev->bd_disk, cmd, arg);
+ return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
}
static struct target_type linear_target = {
.name = "linear",
- .version= {1, 0, 2},
+ .version= {1, 0, 3},
.module = THIS_MODULE,
.ctr = linear_ctr,
.dtr = linear_dtr,
.map = linear_map,
.status = linear_status,
.ioctl = linear_ioctl,
+ .merge = linear_merge,
};
int __init dm_linear_init(void)
diff --git a/drivers/md/dm-log.c b/drivers/md/dm-log.c
index 67a6f31b7fc3..a8c0fc79ca78 100644
--- a/drivers/md/dm-log.c
+++ b/drivers/md/dm-log.c
@@ -12,7 +12,7 @@
#include <linux/dm-io.h>
#include <linux/dm-dirty-log.h>
-#include "dm.h"
+#include <linux/device-mapper.h>
#define DM_MSG_PREFIX "dirty region log"
@@ -831,7 +831,7 @@ static struct dm_dirty_log_type _disk_type = {
.status = disk_status,
};
-int __init dm_dirty_log_init(void)
+static int __init dm_dirty_log_init(void)
{
int r;
@@ -848,7 +848,7 @@ int __init dm_dirty_log_init(void)
return r;
}
-void __exit dm_dirty_log_exit(void)
+static void __exit dm_dirty_log_exit(void)
{
dm_dirty_log_type_unregister(&_disk_type);
dm_dirty_log_type_unregister(&_core_type);
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 9f7302d4878d..4840733cd903 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -5,7 +5,8 @@
* This file is released under the GPL.
*/
-#include "dm.h"
+#include <linux/device-mapper.h>
+
#include "dm-path-selector.h"
#include "dm-bio-list.h"
#include "dm-bio-record.h"
@@ -30,9 +31,11 @@ struct pgpath {
struct list_head list;
struct priority_group *pg; /* Owning PG */
+ unsigned is_active; /* Path status */
unsigned fail_count; /* Cumulative failure count */
struct dm_path path;
+ struct work_struct deactivate_path;
};
#define path_to_pgpath(__pgp) container_of((__pgp), struct pgpath, path)
@@ -63,6 +66,7 @@ struct multipath {
const char *hw_handler_name;
struct work_struct activate_path;
+ struct pgpath *pgpath_to_activate;
unsigned nr_priority_groups;
struct list_head priority_groups;
unsigned pg_init_required; /* pg_init needs calling? */
@@ -111,6 +115,7 @@ static struct workqueue_struct *kmultipathd, *kmpath_handlerd;
static void process_queued_ios(struct work_struct *work);
static void trigger_event(struct work_struct *work);
static void activate_path(struct work_struct *work);
+static void deactivate_path(struct work_struct *work);
/*-----------------------------------------------
@@ -121,8 +126,10 @@ static struct pgpath *alloc_pgpath(void)
{
struct pgpath *pgpath = kzalloc(sizeof(*pgpath), GFP_KERNEL);
- if (pgpath)
- pgpath->path.is_active = 1;
+ if (pgpath) {
+ pgpath->is_active = 1;
+ INIT_WORK(&pgpath->deactivate_path, deactivate_path);
+ }
return pgpath;
}
@@ -132,6 +139,14 @@ static void free_pgpath(struct pgpath *pgpath)
kfree(pgpath);
}
+static void deactivate_path(struct work_struct *work)
+{
+ struct pgpath *pgpath =
+ container_of(work, struct pgpath, deactivate_path);
+
+ blk_abort_queue(pgpath->path.dev->bdev->bd_disk->queue);
+}
+
static struct priority_group *alloc_priority_group(void)
{
struct priority_group *pg;
@@ -146,11 +161,19 @@ static struct priority_group *alloc_priority_group(void)
static void free_pgpaths(struct list_head *pgpaths, struct dm_target *ti)
{
+ unsigned long flags;
struct pgpath *pgpath, *tmp;
+ struct multipath *m = ti->private;
list_for_each_entry_safe(pgpath, tmp, pgpaths, list) {
list_del(&pgpath->list);
+ if (m->hw_handler_name)
+ scsi_dh_detach(bdev_get_queue(pgpath->path.dev->bdev));
dm_put_device(ti, pgpath->path.dev);
+ spin_lock_irqsave(&m->lock, flags);
+ if (m->pgpath_to_activate == pgpath)
+ m->pgpath_to_activate = NULL;
+ spin_unlock_irqrestore(&m->lock, flags);
free_pgpath(pgpath);
}
}
@@ -418,6 +441,7 @@ static void process_queued_ios(struct work_struct *work)
__choose_pgpath(m);
pgpath = m->current_pgpath;
+ m->pgpath_to_activate = m->current_pgpath;
if ((pgpath && !m->queue_io) ||
(!pgpath && !m->queue_if_no_path))
@@ -525,8 +549,10 @@ static int parse_path_selector(struct arg_set *as, struct priority_group *pg,
}
r = read_param(_params, shift(as), &ps_argc, &ti->error);
- if (r)
+ if (r) {
+ dm_put_path_selector(pst);
return -EINVAL;
+ }
r = pst->create(&pg->ps, ps_argc, as->argv);
if (r) {
@@ -546,16 +572,17 @@ static struct pgpath *parse_path(struct arg_set *as, struct path_selector *ps,
{
int r;
struct pgpath *p;
+ struct multipath *m = ti->private;
/* we need at least a path arg */
if (as->argc < 1) {
ti->error = "no device given";
- return NULL;
+ return ERR_PTR(-EINVAL);
}
p = alloc_pgpath();
if (!p)
- return NULL;
+ return ERR_PTR(-ENOMEM);
r = dm_get_device(ti, shift(as), ti->begin, ti->len,
dm_table_get_mode(ti->table), &p->path.dev);
@@ -564,6 +591,15 @@ static struct pgpath *parse_path(struct arg_set *as, struct path_selector *ps,
goto bad;
}
+ if (m->hw_handler_name) {
+ r = scsi_dh_attach(bdev_get_queue(p->path.dev->bdev),
+ m->hw_handler_name);
+ if (r < 0) {
+ dm_put_device(ti, p->path.dev);
+ goto bad;
+ }
+ }
+
r = ps->type->add_path(ps, &p->path, as->argc, as->argv, &ti->error);
if (r) {
dm_put_device(ti, p->path.dev);
@@ -574,7 +610,7 @@ static struct pgpath *parse_path(struct arg_set *as, struct path_selector *ps,
bad:
free_pgpath(p);
- return NULL;
+ return ERR_PTR(r);
}
static struct priority_group *parse_priority_group(struct arg_set *as,
@@ -592,14 +628,14 @@ static struct priority_group *parse_priority_group(struct arg_set *as,
if (as->argc < 2) {
as->argc = 0;
- ti->error = "not enough priority group aruments";
- return NULL;
+ ti->error = "not enough priority group arguments";
+ return ERR_PTR(-EINVAL);
}
pg = alloc_priority_group();
if (!pg) {
ti->error = "couldn't allocate priority group";
- return NULL;
+ return ERR_PTR(-ENOMEM);
}
pg->m = m;
@@ -623,15 +659,19 @@ static struct priority_group *parse_priority_group(struct arg_set *as,
struct pgpath *pgpath;
struct arg_set path_args;
- if (as->argc < nr_params)
+ if (as->argc < nr_params) {
+ ti->error = "not enough path parameters";
goto bad;
+ }
path_args.argc = nr_params;
path_args.argv = as->argv;
pgpath = parse_path(&path_args, &pg->ps, ti);
- if (!pgpath)
+ if (IS_ERR(pgpath)) {
+ r = PTR_ERR(pgpath);
goto bad;
+ }
pgpath->pg = pg;
list_add_tail(&pgpath->list, &pg->pgpaths);
@@ -642,7 +682,7 @@ static struct priority_group *parse_priority_group(struct arg_set *as,
bad:
free_priority_group(pg, ti);
- return NULL;
+ return ERR_PTR(r);
}
static int parse_hw_handler(struct arg_set *as, struct multipath *m)
@@ -761,8 +801,8 @@ static int multipath_ctr(struct dm_target *ti, unsigned int argc,
struct priority_group *pg;
pg = parse_priority_group(&as, m);
- if (!pg) {
- r = -EINVAL;
+ if (IS_ERR(pg)) {
+ r = PTR_ERR(pg);
goto bad;
}
@@ -810,7 +850,7 @@ static int multipath_map(struct dm_target *ti, struct bio *bio,
dm_bio_record(&mpio->details, bio);
map_context->ptr = mpio;
- bio->bi_rw |= (1 << BIO_RW_FAILFAST);
+ bio->bi_rw |= (1 << BIO_RW_FAILFAST_TRANSPORT);
r = map_io(m, bio, mpio, 0);
if (r < 0 || r == DM_MAPIO_REQUEUE)
mempool_free(mpio, m->mpio_pool);
@@ -828,13 +868,13 @@ static int fail_path(struct pgpath *pgpath)
spin_lock_irqsave(&m->lock, flags);
- if (!pgpath->path.is_active)
+ if (!pgpath->is_active)
goto out;
DMWARN("Failing path %s.", pgpath->path.dev->name);
pgpath->pg->ps.type->fail_path(&pgpath->pg->ps, &pgpath->path);
- pgpath->path.is_active = 0;
+ pgpath->is_active = 0;
pgpath->fail_count++;
m->nr_valid_paths--;
@@ -846,6 +886,7 @@ static int fail_path(struct pgpath *pgpath)
pgpath->path.dev->name, m->nr_valid_paths);
queue_work(kmultipathd, &m->trigger_event);
+ queue_work(kmultipathd, &pgpath->deactivate_path);
out:
spin_unlock_irqrestore(&m->lock, flags);
@@ -864,10 +905,10 @@ static int reinstate_path(struct pgpath *pgpath)
spin_lock_irqsave(&m->lock, flags);
- if (pgpath->path.is_active)
+ if (pgpath->is_active)
goto out;
- if (!pgpath->pg->ps.type) {
+ if (!pgpath->pg->ps.type->reinstate_path) {
DMWARN("Reinstate path not supported by path selector %s",
pgpath->pg->ps.type->name);
r = -EINVAL;
@@ -878,7 +919,7 @@ static int reinstate_path(struct pgpath *pgpath)
if (r)
goto out;
- pgpath->path.is_active = 1;
+ pgpath->is_active = 1;
m->current_pgpath = NULL;
if (!m->nr_valid_paths++ && m->queue_size)
@@ -1076,8 +1117,15 @@ static void activate_path(struct work_struct *work)
int ret;
struct multipath *m =
container_of(work, struct multipath, activate_path);
- struct dm_path *path = &m->current_pgpath->path;
+ struct dm_path *path;
+ unsigned long flags;
+ spin_lock_irqsave(&m->lock, flags);
+ path = &m->pgpath_to_activate->path;
+ m->pgpath_to_activate = NULL;
+ spin_unlock_irqrestore(&m->lock, flags);
+ if (!path)
+ return;
ret = scsi_dh_activate(bdev_get_queue(path->dev->bdev));
pg_init_done(path, ret);
}
@@ -1259,7 +1307,7 @@ static int multipath_status(struct dm_target *ti, status_type_t type,
list_for_each_entry(p, &pg->pgpaths, list) {
DMEMIT("%s %s %u ", p->path.dev->name,
- p->path.is_active ? "A" : "F",
+ p->is_active ? "A" : "F",
p->fail_count);
if (pg->ps.type->status)
sz += pg->ps.type->status(&pg->ps,
@@ -1348,19 +1396,15 @@ error:
return -EINVAL;
}
-static int multipath_ioctl(struct dm_target *ti, struct inode *inode,
- struct file *filp, unsigned int cmd,
+static int multipath_ioctl(struct dm_target *ti, unsigned int cmd,
unsigned long arg)
{
struct multipath *m = (struct multipath *) ti->private;
struct block_device *bdev = NULL;
+ fmode_t mode = 0;
unsigned long flags;
- struct file fake_file = {};
- struct dentry fake_dentry = {};
int r = 0;
- fake_file.f_path.dentry = &fake_dentry;
-
spin_lock_irqsave(&m->lock, flags);
if (!m->current_pgpath)
@@ -1368,8 +1412,7 @@ static int multipath_ioctl(struct dm_target *ti, struct inode *inode,
if (m->current_pgpath) {
bdev = m->current_pgpath->path.dev->bdev;
- fake_dentry.d_inode = bdev->bd_inode;
- fake_file.f_mode = m->current_pgpath->path.dev->mode;
+ mode = m->current_pgpath->path.dev->mode;
}
if (m->queue_io)
@@ -1379,8 +1422,7 @@ static int multipath_ioctl(struct dm_target *ti, struct inode *inode,
spin_unlock_irqrestore(&m->lock, flags);
- return r ? : blkdev_driver_ioctl(bdev->bd_inode, &fake_file,
- bdev->bd_disk, cmd, arg);
+ return r ? : __blkdev_driver_ioctl(bdev, mode, cmd, arg);
}
/*-----------------------------------------------------------------
diff --git a/drivers/md/dm-mpath.h b/drivers/md/dm-mpath.h
index c198b856a452..e230f7196259 100644
--- a/drivers/md/dm-mpath.h
+++ b/drivers/md/dm-mpath.h
@@ -13,8 +13,6 @@ struct dm_dev;
struct dm_path {
struct dm_dev *dev; /* Read-only */
- unsigned is_active; /* Read-only */
-
void *pscontext; /* For path-selector use */
};
diff --git a/drivers/md/dm-path-selector.c b/drivers/md/dm-path-selector.c
index ca1bb636a3e4..96ea226155b1 100644
--- a/drivers/md/dm-path-selector.c
+++ b/drivers/md/dm-path-selector.c
@@ -9,7 +9,8 @@
* Path selector registration.
*/
-#include "dm.h"
+#include <linux/device-mapper.h>
+
#include "dm-path-selector.h"
#include <linux/slab.h>
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index ff05fe893083..92dcc06832a4 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -1,30 +1,30 @@
/*
* Copyright (C) 2003 Sistina Software Limited.
+ * Copyright (C) 2005-2008 Red Hat, Inc. All rights reserved.
*
* This file is released under the GPL.
*/
-#include "dm.h"
#include "dm-bio-list.h"
#include "dm-bio-record.h"
-#include <linux/ctype.h>
#include <linux/init.h>
#include <linux/mempool.h>
#include <linux/module.h>
#include <linux/pagemap.h>
#include <linux/slab.h>
-#include <linux/time.h>
-#include <linux/vmalloc.h>
#include <linux/workqueue.h>
-#include <linux/log2.h>
-#include <linux/hardirq.h>
+#include <linux/device-mapper.h>
#include <linux/dm-io.h>
#include <linux/dm-dirty-log.h>
#include <linux/dm-kcopyd.h>
+#include <linux/dm-region-hash.h>
#define DM_MSG_PREFIX "raid1"
+
+#define MAX_RECOVERY 1 /* Maximum number of regions recovered in parallel. */
#define DM_IO_PAGES 64
+#define DM_KCOPYD_PAGES 64
#define DM_RAID1_HANDLE_ERRORS 0x01
#define errors_handled(p) ((p)->features & DM_RAID1_HANDLE_ERRORS)
@@ -32,87 +32,6 @@
static DECLARE_WAIT_QUEUE_HEAD(_kmirrord_recovery_stopped);
/*-----------------------------------------------------------------
- * Region hash
- *
- * The mirror splits itself up into discrete regions. Each
- * region can be in one of three states: clean, dirty,
- * nosync. There is no need to put clean regions in the hash.
- *
- * In addition to being present in the hash table a region _may_
- * be present on one of three lists.
- *
- * clean_regions: Regions on this list have no io pending to
- * them, they are in sync, we are no longer interested in them,
- * they are dull. rh_update_states() will remove them from the
- * hash table.
- *
- * quiesced_regions: These regions have been spun down, ready
- * for recovery. rh_recovery_start() will remove regions from
- * this list and hand them to kmirrord, which will schedule the
- * recovery io with kcopyd.
- *
- * recovered_regions: Regions that kcopyd has successfully
- * recovered. rh_update_states() will now schedule any delayed
- * io, up the recovery_count, and remove the region from the
- * hash.
- *
- * There are 2 locks:
- * A rw spin lock 'hash_lock' protects just the hash table,
- * this is never held in write mode from interrupt context,
- * which I believe means that we only have to disable irqs when
- * doing a write lock.
- *
- * An ordinary spin lock 'region_lock' that protects the three
- * lists in the region_hash, with the 'state', 'list' and
- * 'bhs_delayed' fields of the regions. This is used from irq
- * context, so all other uses will have to suspend local irqs.
- *---------------------------------------------------------------*/
-struct mirror_set;
-struct region_hash {
- struct mirror_set *ms;
- uint32_t region_size;
- unsigned region_shift;
-
- /* holds persistent region state */
- struct dm_dirty_log *log;
-
- /* hash table */
- rwlock_t hash_lock;
- mempool_t *region_pool;
- unsigned int mask;
- unsigned int nr_buckets;
- struct list_head *buckets;
-
- spinlock_t region_lock;
- atomic_t recovery_in_flight;
- struct semaphore recovery_count;
- struct list_head clean_regions;
- struct list_head quiesced_regions;
- struct list_head recovered_regions;
- struct list_head failed_recovered_regions;
-};
-
-enum {
- RH_CLEAN,
- RH_DIRTY,
- RH_NOSYNC,
- RH_RECOVERING
-};
-
-struct region {
- struct region_hash *rh; /* FIXME: can we get rid of this ? */
- region_t key;
- int state;
-
- struct list_head hash_list;
- struct list_head list;
-
- atomic_t pending;
- struct bio_list delayed_bios;
-};
-
-
-/*-----------------------------------------------------------------
* Mirror set structures.
*---------------------------------------------------------------*/
enum dm_raid1_error {
@@ -132,8 +51,7 @@ struct mirror {
struct mirror_set {
struct dm_target *ti;
struct list_head list;
- struct region_hash rh;
- struct dm_kcopyd_client *kcopyd_client;
+
uint64_t features;
spinlock_t lock; /* protects the lists */
@@ -141,6 +59,8 @@ struct mirror_set {
struct bio_list writes;
struct bio_list failures;
+ struct dm_region_hash *rh;
+ struct dm_kcopyd_client *kcopyd_client;
struct dm_io_client *io_client;
mempool_t *read_record_pool;
@@ -159,25 +79,14 @@ struct mirror_set {
struct work_struct trigger_event;
- unsigned int nr_mirrors;
+ unsigned nr_mirrors;
struct mirror mirror[0];
};
-/*
- * Conversion fns
- */
-static inline region_t bio_to_region(struct region_hash *rh, struct bio *bio)
-{
- return (bio->bi_sector - rh->ms->ti->begin) >> rh->region_shift;
-}
-
-static inline sector_t region_to_sector(struct region_hash *rh, region_t region)
+static void wakeup_mirrord(void *context)
{
- return region << rh->region_shift;
-}
+ struct mirror_set *ms = context;
-static void wake(struct mirror_set *ms)
-{
queue_work(ms->kmirrord_wq, &ms->kmirrord_work);
}
@@ -186,7 +95,7 @@ static void delayed_wake_fn(unsigned long data)
struct mirror_set *ms = (struct mirror_set *) data;
clear_bit(0, &ms->timer_pending);
- wake(ms);
+ wakeup_mirrord(ms);
}
static void delayed_wake(struct mirror_set *ms)
@@ -200,473 +109,34 @@ static void delayed_wake(struct mirror_set *ms)
add_timer(&ms->timer);
}
-/* FIXME move this */
-static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw);
-
-#define MIN_REGIONS 64
-#define MAX_RECOVERY 1
-static int rh_init(struct region_hash *rh, struct mirror_set *ms,
- struct dm_dirty_log *log, uint32_t region_size,
- region_t nr_regions)
-{
- unsigned int nr_buckets, max_buckets;
- size_t i;
-
- /*
- * Calculate a suitable number of buckets for our hash
- * table.
- */
- max_buckets = nr_regions >> 6;
- for (nr_buckets = 128u; nr_buckets < max_buckets; nr_buckets <<= 1)
- ;
- nr_buckets >>= 1;
-
- rh->ms = ms;
- rh->log = log;
- rh->region_size = region_size;
- rh->region_shift = ffs(region_size) - 1;
- rwlock_init(&rh->hash_lock);
- rh->mask = nr_buckets - 1;
- rh->nr_buckets = nr_buckets;
-
- rh->buckets = vmalloc(nr_buckets * sizeof(*rh->buckets));
- if (!rh->buckets) {
- DMERR("unable to allocate region hash memory");
- return -ENOMEM;
- }
-
- for (i = 0; i < nr_buckets; i++)
- INIT_LIST_HEAD(rh->buckets + i);
-
- spin_lock_init(&rh->region_lock);
- sema_init(&rh->recovery_count, 0);
- atomic_set(&rh->recovery_in_flight, 0);
- INIT_LIST_HEAD(&rh->clean_regions);
- INIT_LIST_HEAD(&rh->quiesced_regions);
- INIT_LIST_HEAD(&rh->recovered_regions);
- INIT_LIST_HEAD(&rh->failed_recovered_regions);
-
- rh->region_pool = mempool_create_kmalloc_pool(MIN_REGIONS,
- sizeof(struct region));
- if (!rh->region_pool) {
- vfree(rh->buckets);
- rh->buckets = NULL;
- return -ENOMEM;
- }
-
- return 0;
-}
-
-static void rh_exit(struct region_hash *rh)
-{
- unsigned int h;
- struct region *reg, *nreg;
-
- BUG_ON(!list_empty(&rh->quiesced_regions));
- for (h = 0; h < rh->nr_buckets; h++) {
- list_for_each_entry_safe(reg, nreg, rh->buckets + h, hash_list) {
- BUG_ON(atomic_read(&reg->pending));
- mempool_free(reg, rh->region_pool);
- }
- }
-
- if (rh->log)
- dm_dirty_log_destroy(rh->log);
- if (rh->region_pool)
- mempool_destroy(rh->region_pool);
- vfree(rh->buckets);
-}
-
-#define RH_HASH_MULT 2654435387U
-
-static inline unsigned int rh_hash(struct region_hash *rh, region_t region)
-{
- return (unsigned int) ((region * RH_HASH_MULT) >> 12) & rh->mask;
-}
-
-static struct region *__rh_lookup(struct region_hash *rh, region_t region)
-{
- struct region *reg;
-
- list_for_each_entry (reg, rh->buckets + rh_hash(rh, region), hash_list)
- if (reg->key == region)
- return reg;
-
- return NULL;
-}
-
-static void __rh_insert(struct region_hash *rh, struct region *reg)
-{
- unsigned int h = rh_hash(rh, reg->key);
- list_add(&reg->hash_list, rh->buckets + h);
-}
-
-static struct region *__rh_alloc(struct region_hash *rh, region_t region)
-{
- struct region *reg, *nreg;
-
- read_unlock(&rh->hash_lock);
- nreg = mempool_alloc(rh->region_pool, GFP_ATOMIC);
- if (unlikely(!nreg))
- nreg = kmalloc(sizeof(struct region), GFP_NOIO);
- nreg->state = rh->log->type->in_sync(rh->log, region, 1) ?
- RH_CLEAN : RH_NOSYNC;
- nreg->rh = rh;
- nreg->key = region;
-
- INIT_LIST_HEAD(&nreg->list);
-
- atomic_set(&nreg->pending, 0);
- bio_list_init(&nreg->delayed_bios);
- write_lock_irq(&rh->hash_lock);
-
- reg = __rh_lookup(rh, region);
- if (reg)
- /* we lost the race */
- mempool_free(nreg, rh->region_pool);
-
- else {
- __rh_insert(rh, nreg);
- if (nreg->state == RH_CLEAN) {
- spin_lock(&rh->region_lock);
- list_add(&nreg->list, &rh->clean_regions);
- spin_unlock(&rh->region_lock);
- }
- reg = nreg;
- }
- write_unlock_irq(&rh->hash_lock);
- read_lock(&rh->hash_lock);
-
- return reg;
-}
-
-static inline struct region *__rh_find(struct region_hash *rh, region_t region)
-{
- struct region *reg;
-
- reg = __rh_lookup(rh, region);
- if (!reg)
- reg = __rh_alloc(rh, region);
-
- return reg;
-}
-
-static int rh_state(struct region_hash *rh, region_t region, int may_block)
-{
- int r;
- struct region *reg;
-
- read_lock(&rh->hash_lock);
- reg = __rh_lookup(rh, region);
- read_unlock(&rh->hash_lock);
-
- if (reg)
- return reg->state;
-
- /*
- * The region wasn't in the hash, so we fall back to the
- * dirty log.
- */
- r = rh->log->type->in_sync(rh->log, region, may_block);
-
- /*
- * Any error from the dirty log (eg. -EWOULDBLOCK) gets
- * taken as a RH_NOSYNC
- */
- return r == 1 ? RH_CLEAN : RH_NOSYNC;
-}
-
-static inline int rh_in_sync(struct region_hash *rh,
- region_t region, int may_block)
+static void wakeup_all_recovery_waiters(void *context)
{
- int state = rh_state(rh, region, may_block);
- return state == RH_CLEAN || state == RH_DIRTY;
+ wake_up_all(&_kmirrord_recovery_stopped);
}
-static void dispatch_bios(struct mirror_set *ms, struct bio_list *bio_list)
-{
- struct bio *bio;
-
- while ((bio = bio_list_pop(bio_list))) {
- queue_bio(ms, bio, WRITE);
- }
-}
-
-static void complete_resync_work(struct region *reg, int success)
-{
- struct region_hash *rh = reg->rh;
-
- rh->log->type->set_region_sync(rh->log, reg->key, success);
-
- /*
- * Dispatch the bios before we call 'wake_up_all'.
- * This is important because if we are suspending,
- * we want to know that recovery is complete and
- * the work queue is flushed. If we wake_up_all
- * before we dispatch_bios (queue bios and call wake()),
- * then we risk suspending before the work queue
- * has been properly flushed.
- */
- dispatch_bios(rh->ms, &reg->delayed_bios);
- if (atomic_dec_and_test(&rh->recovery_in_flight))
- wake_up_all(&_kmirrord_recovery_stopped);
- up(&rh->recovery_count);
-}
-
-static void rh_update_states(struct region_hash *rh)
-{
- struct region *reg, *next;
-
- LIST_HEAD(clean);
- LIST_HEAD(recovered);
- LIST_HEAD(failed_recovered);
-
- /*
- * Quickly grab the lists.
- */
- write_lock_irq(&rh->hash_lock);
- spin_lock(&rh->region_lock);
- if (!list_empty(&rh->clean_regions)) {
- list_splice_init(&rh->clean_regions, &clean);
-
- list_for_each_entry(reg, &clean, list)
- list_del(&reg->hash_list);
- }
-
- if (!list_empty(&rh->recovered_regions)) {
- list_splice_init(&rh->recovered_regions, &recovered);
-
- list_for_each_entry (reg, &recovered, list)
- list_del(&reg->hash_list);
- }
-
- if (!list_empty(&rh->failed_recovered_regions)) {
- list_splice_init(&rh->failed_recovered_regions,
- &failed_recovered);
-
- list_for_each_entry(reg, &failed_recovered, list)
- list_del(&reg->hash_list);
- }
-
- spin_unlock(&rh->region_lock);
- write_unlock_irq(&rh->hash_lock);
-
- /*
- * All the regions on the recovered and clean lists have
- * now been pulled out of the system, so no need to do
- * any more locking.
- */
- list_for_each_entry_safe (reg, next, &recovered, list) {
- rh->log->type->clear_region(rh->log, reg->key);
- complete_resync_work(reg, 1);
- mempool_free(reg, rh->region_pool);
- }
-
- list_for_each_entry_safe(reg, next, &failed_recovered, list) {
- complete_resync_work(reg, errors_handled(rh->ms) ? 0 : 1);
- mempool_free(reg, rh->region_pool);
- }
-
- list_for_each_entry_safe(reg, next, &clean, list) {
- rh->log->type->clear_region(rh->log, reg->key);
- mempool_free(reg, rh->region_pool);
- }
-
- rh->log->type->flush(rh->log);
-}
-
-static void rh_inc(struct region_hash *rh, region_t region)
-{
- struct region *reg;
-
- read_lock(&rh->hash_lock);
- reg = __rh_find(rh, region);
-
- spin_lock_irq(&rh->region_lock);
- atomic_inc(&reg->pending);
-
- if (reg->state == RH_CLEAN) {
- reg->state = RH_DIRTY;
- list_del_init(&reg->list); /* take off the clean list */
- spin_unlock_irq(&rh->region_lock);
-
- rh->log->type->mark_region(rh->log, reg->key);
- } else
- spin_unlock_irq(&rh->region_lock);
-
-
- read_unlock(&rh->hash_lock);
-}
-
-static void rh_inc_pending(struct region_hash *rh, struct bio_list *bios)
-{
- struct bio *bio;
-
- for (bio = bios->head; bio; bio = bio->bi_next)
- rh_inc(rh, bio_to_region(rh, bio));
-}
-
-static void rh_dec(struct region_hash *rh, region_t region)
+static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw)
{
unsigned long flags;
- struct region *reg;
int should_wake = 0;
+ struct bio_list *bl;
- read_lock(&rh->hash_lock);
- reg = __rh_lookup(rh, region);
- read_unlock(&rh->hash_lock);
-
- spin_lock_irqsave(&rh->region_lock, flags);
- if (atomic_dec_and_test(&reg->pending)) {
- /*
- * There is no pending I/O for this region.
- * We can move the region to corresponding list for next action.
- * At this point, the region is not yet connected to any list.
- *
- * If the state is RH_NOSYNC, the region should be kept off
- * from clean list.
- * The hash entry for RH_NOSYNC will remain in memory
- * until the region is recovered or the map is reloaded.
- */
-
- /* do nothing for RH_NOSYNC */
- if (reg->state == RH_RECOVERING) {
- list_add_tail(&reg->list, &rh->quiesced_regions);
- } else if (reg->state == RH_DIRTY) {
- reg->state = RH_CLEAN;
- list_add(&reg->list, &rh->clean_regions);
- }
- should_wake = 1;
- }
- spin_unlock_irqrestore(&rh->region_lock, flags);
+ bl = (rw == WRITE) ? &ms->writes : &ms->reads;
+ spin_lock_irqsave(&ms->lock, flags);
+ should_wake = !(bl->head);
+ bio_list_add(bl, bio);
+ spin_unlock_irqrestore(&ms->lock, flags);
if (should_wake)
- wake(rh->ms);
+ wakeup_mirrord(ms);
}
-/*
- * Starts quiescing a region in preparation for recovery.
- */
-static int __rh_recovery_prepare(struct region_hash *rh)
+static void dispatch_bios(void *context, struct bio_list *bio_list)
{
- int r;
- struct region *reg;
- region_t region;
-
- /*
- * Ask the dirty log what's next.
- */
- r = rh->log->type->get_resync_work(rh->log, &region);
- if (r <= 0)
- return r;
-
- /*
- * Get this region, and start it quiescing by setting the
- * recovering flag.
- */
- read_lock(&rh->hash_lock);
- reg = __rh_find(rh, region);
- read_unlock(&rh->hash_lock);
-
- spin_lock_irq(&rh->region_lock);
- reg->state = RH_RECOVERING;
-
- /* Already quiesced ? */
- if (atomic_read(&reg->pending))
- list_del_init(&reg->list);
- else
- list_move(&reg->list, &rh->quiesced_regions);
-
- spin_unlock_irq(&rh->region_lock);
-
- return 1;
-}
-
-static void rh_recovery_prepare(struct region_hash *rh)
-{
- /* Extra reference to avoid race with rh_stop_recovery */
- atomic_inc(&rh->recovery_in_flight);
-
- while (!down_trylock(&rh->recovery_count)) {
- atomic_inc(&rh->recovery_in_flight);
- if (__rh_recovery_prepare(rh) <= 0) {
- atomic_dec(&rh->recovery_in_flight);
- up(&rh->recovery_count);
- break;
- }
- }
-
- /* Drop the extra reference */
- if (atomic_dec_and_test(&rh->recovery_in_flight))
- wake_up_all(&_kmirrord_recovery_stopped);
-}
-
-/*
- * Returns any quiesced regions.
- */
-static struct region *rh_recovery_start(struct region_hash *rh)
-{
- struct region *reg = NULL;
-
- spin_lock_irq(&rh->region_lock);
- if (!list_empty(&rh->quiesced_regions)) {
- reg = list_entry(rh->quiesced_regions.next,
- struct region, list);
- list_del_init(&reg->list); /* remove from the quiesced list */
- }
- spin_unlock_irq(&rh->region_lock);
-
- return reg;
-}
-
-static void rh_recovery_end(struct region *reg, int success)
-{
- struct region_hash *rh = reg->rh;
-
- spin_lock_irq(&rh->region_lock);
- if (success)
- list_add(&reg->list, &reg->rh->recovered_regions);
- else {
- reg->state = RH_NOSYNC;
- list_add(&reg->list, &reg->rh->failed_recovered_regions);
- }
- spin_unlock_irq(&rh->region_lock);
-
- wake(rh->ms);
-}
-
-static int rh_flush(struct region_hash *rh)
-{
- return rh->log->type->flush(rh->log);
-}
-
-static void rh_delay(struct region_hash *rh, struct bio *bio)
-{
- struct region *reg;
-
- read_lock(&rh->hash_lock);
- reg = __rh_find(rh, bio_to_region(rh, bio));
- bio_list_add(&reg->delayed_bios, bio);
- read_unlock(&rh->hash_lock);
-}
-
-static void rh_stop_recovery(struct region_hash *rh)
-{
- int i;
-
- /* wait for any recovering regions */
- for (i = 0; i < MAX_RECOVERY; i++)
- down(&rh->recovery_count);
-}
-
-static void rh_start_recovery(struct region_hash *rh)
-{
- int i;
-
- for (i = 0; i < MAX_RECOVERY; i++)
- up(&rh->recovery_count);
+ struct mirror_set *ms = context;
+ struct bio *bio;
- wake(rh->ms);
+ while ((bio = bio_list_pop(bio_list)))
+ queue_bio(ms, bio, WRITE);
}
#define MIN_READ_RECORDS 20
@@ -776,8 +246,8 @@ out:
static void recovery_complete(int read_err, unsigned long write_err,
void *context)
{
- struct region *reg = (struct region *)context;
- struct mirror_set *ms = reg->rh->ms;
+ struct dm_region *reg = context;
+ struct mirror_set *ms = dm_rh_region_context(reg);
int m, bit = 0;
if (read_err) {
@@ -803,31 +273,33 @@ static void recovery_complete(int read_err, unsigned long write_err,
}
}
- rh_recovery_end(reg, !(read_err || write_err));
+ dm_rh_recovery_end(reg, !(read_err || write_err));
}
-static int recover(struct mirror_set *ms, struct region *reg)
+static int recover(struct mirror_set *ms, struct dm_region *reg)
{
int r;
- unsigned int i;
+ unsigned i;
struct dm_io_region from, to[DM_KCOPYD_MAX_REGIONS], *dest;
struct mirror *m;
unsigned long flags = 0;
+ region_t key = dm_rh_get_region_key(reg);
+ sector_t region_size = dm_rh_get_region_size(ms->rh);
/* fill in the source */
m = get_default_mirror(ms);
from.bdev = m->dev->bdev;
- from.sector = m->offset + region_to_sector(reg->rh, reg->key);
- if (reg->key == (ms->nr_regions - 1)) {
+ from.sector = m->offset + dm_rh_region_to_sector(ms->rh, key);
+ if (key == (ms->nr_regions - 1)) {
/*
* The final region may be smaller than
* region_size.
*/
- from.count = ms->ti->len & (reg->rh->region_size - 1);
+ from.count = ms->ti->len & (region_size - 1);
if (!from.count)
- from.count = reg->rh->region_size;
+ from.count = region_size;
} else
- from.count = reg->rh->region_size;
+ from.count = region_size;
/* fill in the destinations */
for (i = 0, dest = to; i < ms->nr_mirrors; i++) {
@@ -836,13 +308,15 @@ static int recover(struct mirror_set *ms, struct region *reg)
m = ms->mirror + i;
dest->bdev = m->dev->bdev;
- dest->sector = m->offset + region_to_sector(reg->rh, reg->key);
+ dest->sector = m->offset + dm_rh_region_to_sector(ms->rh, key);
dest->count = from.count;
dest++;
}
/* hand to kcopyd */
- set_bit(DM_KCOPYD_IGNORE_ERROR, &flags);
+ if (!errors_handled(ms))
+ set_bit(DM_KCOPYD_IGNORE_ERROR, &flags);
+
r = dm_kcopyd_copy(ms->kcopyd_client, &from, ms->nr_mirrors - 1, to,
flags, recovery_complete, reg);
@@ -851,22 +325,22 @@ static int recover(struct mirror_set *ms, struct region *reg)
static void do_recovery(struct mirror_set *ms)
{
+ struct dm_region *reg;
+ struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
int r;
- struct region *reg;
- struct dm_dirty_log *log = ms->rh.log;
/*
* Start quiescing some regions.
*/
- rh_recovery_prepare(&ms->rh);
+ dm_rh_recovery_prepare(ms->rh);
/*
* Copy any already quiesced regions.
*/
- while ((reg = rh_recovery_start(&ms->rh))) {
+ while ((reg = dm_rh_recovery_start(ms->rh))) {
r = recover(ms, reg);
if (r)
- rh_recovery_end(reg, 0);
+ dm_rh_recovery_end(reg, 0);
}
/*
@@ -907,9 +381,10 @@ static int default_ok(struct mirror *m)
static int mirror_available(struct mirror_set *ms, struct bio *bio)
{
- region_t region = bio_to_region(&ms->rh, bio);
+ struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
+ region_t region = dm_rh_bio_to_region(ms->rh, bio);
- if (ms->rh.log->type->in_sync(ms->rh.log, region, 0))
+ if (log->type->in_sync(log, region, 0))
return choose_mirror(ms, bio->bi_sector) ? 1 : 0;
return 0;
@@ -983,7 +458,14 @@ static void read_async_bio(struct mirror *m, struct bio *bio)
map_region(&io, m, bio);
bio_set_m(bio, m);
- (void) dm_io(&io_req, 1, &io, NULL);
+ BUG_ON(dm_io(&io_req, 1, &io, NULL));
+}
+
+static inline int region_in_sync(struct mirror_set *ms, region_t region,
+ int may_block)
+{
+ int state = dm_rh_get_state(ms->rh, region, may_block);
+ return state == DM_RH_CLEAN || state == DM_RH_DIRTY;
}
static void do_reads(struct mirror_set *ms, struct bio_list *reads)
@@ -993,13 +475,13 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
struct mirror *m;
while ((bio = bio_list_pop(reads))) {
- region = bio_to_region(&ms->rh, bio);
+ region = dm_rh_bio_to_region(ms->rh, bio);
m = get_default_mirror(ms);
/*
* We can only read balance if the region is in sync.
*/
- if (likely(rh_in_sync(&ms->rh, region, 1)))
+ if (likely(region_in_sync(ms, region, 1)))
m = choose_mirror(ms, bio->bi_sector);
else if (m && atomic_read(&m->error_count))
m = NULL;
@@ -1022,57 +504,6 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
* NOSYNC: increment pending, just write to the default mirror
*---------------------------------------------------------------*/
-/* __bio_mark_nosync
- * @ms
- * @bio
- * @done
- * @error
- *
- * The bio was written on some mirror(s) but failed on other mirror(s).
- * We can successfully endio the bio but should avoid the region being
- * marked clean by setting the state RH_NOSYNC.
- *
- * This function is _not_ safe in interrupt context!
- */
-static void __bio_mark_nosync(struct mirror_set *ms,
- struct bio *bio, unsigned done, int error)
-{
- unsigned long flags;
- struct region_hash *rh = &ms->rh;
- struct dm_dirty_log *log = ms->rh.log;
- struct region *reg;
- region_t region = bio_to_region(rh, bio);
- int recovering = 0;
-
- /* We must inform the log that the sync count has changed. */
- log->type->set_region_sync(log, region, 0);
- ms->in_sync = 0;
-
- read_lock(&rh->hash_lock);
- reg = __rh_find(rh, region);
- read_unlock(&rh->hash_lock);
-
- /* region hash entry should exist because write was in-flight */
- BUG_ON(!reg);
- BUG_ON(!list_empty(&reg->list));
-
- spin_lock_irqsave(&rh->region_lock, flags);
- /*
- * Possible cases:
- * 1) RH_DIRTY
- * 2) RH_NOSYNC: was dirty, other preceeding writes failed
- * 3) RH_RECOVERING: flushing pending writes
- * Either case, the region should have not been connected to list.
- */
- recovering = (reg->state == RH_RECOVERING);
- reg->state = RH_NOSYNC;
- BUG_ON(!list_empty(&reg->list));
- spin_unlock_irqrestore(&rh->region_lock, flags);
-
- bio_endio(bio, error);
- if (recovering)
- complete_resync_work(reg, 0);
-}
static void write_callback(unsigned long error, void *context)
{
@@ -1117,7 +548,7 @@ static void write_callback(unsigned long error, void *context)
bio_list_add(&ms->failures, bio);
spin_unlock_irqrestore(&ms->lock, flags);
if (should_wake)
- wake(ms);
+ wakeup_mirrord(ms);
return;
}
out:
@@ -1147,7 +578,7 @@ static void do_write(struct mirror_set *ms, struct bio *bio)
*/
bio_set_m(bio, get_default_mirror(ms));
- (void) dm_io(&io_req, ms->nr_mirrors, io, NULL);
+ BUG_ON(dm_io(&io_req, ms->nr_mirrors, io, NULL));
}
static void do_writes(struct mirror_set *ms, struct bio_list *writes)
@@ -1167,18 +598,19 @@ static void do_writes(struct mirror_set *ms, struct bio_list *writes)
bio_list_init(&recover);
while ((bio = bio_list_pop(writes))) {
- state = rh_state(&ms->rh, bio_to_region(&ms->rh, bio), 1);
+ state = dm_rh_get_state(ms->rh,
+ dm_rh_bio_to_region(ms->rh, bio), 1);
switch (state) {
- case RH_CLEAN:
- case RH_DIRTY:
+ case DM_RH_CLEAN:
+ case DM_RH_DIRTY:
this_list = &sync;
break;
- case RH_NOSYNC:
+ case DM_RH_NOSYNC:
this_list = &nosync;
break;
- case RH_RECOVERING:
+ case DM_RH_RECOVERING:
this_list = &recover;
break;
}
@@ -1191,9 +623,9 @@ static void do_writes(struct mirror_set *ms, struct bio_list *writes)
* be written to (writes to recover regions are going to
* be delayed).
*/
- rh_inc_pending(&ms->rh, &sync);
- rh_inc_pending(&ms->rh, &nosync);
- ms->log_failure = rh_flush(&ms->rh) ? 1 : 0;
+ dm_rh_inc_pending(ms->rh, &sync);
+ dm_rh_inc_pending(ms->rh, &nosync);
+ ms->log_failure = dm_rh_flush(ms->rh) ? 1 : 0;
/*
* Dispatch io.
@@ -1202,13 +634,13 @@ static void do_writes(struct mirror_set *ms, struct bio_list *writes)
spin_lock_irq(&ms->lock);
bio_list_merge(&ms->failures, &sync);
spin_unlock_irq(&ms->lock);
- wake(ms);
+ wakeup_mirrord(ms);
} else
while ((bio = bio_list_pop(&sync)))
do_write(ms, bio);
while ((bio = bio_list_pop(&recover)))
- rh_delay(&ms->rh, bio);
+ dm_rh_delay(ms->rh, bio);
while ((bio = bio_list_pop(&nosync))) {
map_bio(get_default_mirror(ms), bio);
@@ -1225,7 +657,8 @@ static void do_failures(struct mirror_set *ms, struct bio_list *failures)
if (!ms->log_failure) {
while ((bio = bio_list_pop(failures)))
- __bio_mark_nosync(ms, bio, bio->bi_size, 0);
+ ms->in_sync = 0;
+ dm_rh_mark_nosync(ms->rh, bio, bio->bi_size, 0);
return;
}
@@ -1278,8 +711,8 @@ static void trigger_event(struct work_struct *work)
*---------------------------------------------------------------*/
static void do_mirror(struct work_struct *work)
{
- struct mirror_set *ms =container_of(work, struct mirror_set,
- kmirrord_work);
+ struct mirror_set *ms = container_of(work, struct mirror_set,
+ kmirrord_work);
struct bio_list reads, writes, failures;
unsigned long flags;
@@ -1292,7 +725,7 @@ static void do_mirror(struct work_struct *work)
bio_list_init(&ms->failures);
spin_unlock_irqrestore(&ms->lock, flags);
- rh_update_states(&ms->rh);
+ dm_rh_update_states(ms->rh, errors_handled(ms));
do_recovery(ms);
do_reads(ms, &reads);
do_writes(ms, &writes);
@@ -1301,7 +734,6 @@ static void do_mirror(struct work_struct *work)
dm_table_unplug_all(ms->ti->table);
}
-
/*-----------------------------------------------------------------
* Target functions
*---------------------------------------------------------------*/
@@ -1313,9 +745,6 @@ static struct mirror_set *alloc_context(unsigned int nr_mirrors,
size_t len;
struct mirror_set *ms = NULL;
- if (array_too_big(sizeof(*ms), sizeof(ms->mirror[0]), nr_mirrors))
- return NULL;
-
len = sizeof(*ms) + (sizeof(ms->mirror[0]) * nr_mirrors);
ms = kzalloc(len, GFP_KERNEL);
@@ -1351,7 +780,11 @@ static struct mirror_set *alloc_context(unsigned int nr_mirrors,
return NULL;
}
- if (rh_init(&ms->rh, ms, dl, region_size, ms->nr_regions)) {
+ ms->rh = dm_region_hash_create(ms, dispatch_bios, wakeup_mirrord,
+ wakeup_all_recovery_waiters,
+ ms->ti->begin, MAX_RECOVERY,
+ dl, region_size, ms->nr_regions);
+ if (IS_ERR(ms->rh)) {
ti->error = "Error creating dirty region hash";
dm_io_client_destroy(ms->io_client);
mempool_destroy(ms->read_record_pool);
@@ -1369,7 +802,7 @@ static void free_context(struct mirror_set *ms, struct dm_target *ti,
dm_put_device(ti, ms->mirror[m].dev);
dm_io_client_destroy(ms->io_client);
- rh_exit(&ms->rh);
+ dm_region_hash_destroy(ms->rh);
mempool_destroy(ms->read_record_pool);
kfree(ms);
}
@@ -1409,10 +842,10 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
* Create dirty log: log_type #log_params <log_params>
*/
static struct dm_dirty_log *create_dirty_log(struct dm_target *ti,
- unsigned int argc, char **argv,
- unsigned int *args_used)
+ unsigned argc, char **argv,
+ unsigned *args_used)
{
- unsigned int param_count;
+ unsigned param_count;
struct dm_dirty_log *dl;
if (argc < 2) {
@@ -1543,7 +976,7 @@ static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
}
ti->private = ms;
- ti->split_io = ms->rh.region_size;
+ ti->split_io = dm_rh_get_region_size(ms->rh);
ms->kmirrord_wq = create_singlethread_workqueue("kmirrord");
if (!ms->kmirrord_wq) {
@@ -1578,11 +1011,11 @@ static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
goto err_destroy_wq;
}
- r = dm_kcopyd_client_create(DM_IO_PAGES, &ms->kcopyd_client);
+ r = dm_kcopyd_client_create(DM_KCOPYD_PAGES, &ms->kcopyd_client);
if (r)
goto err_destroy_wq;
- wake(ms);
+ wakeup_mirrord(ms);
return 0;
err_destroy_wq:
@@ -1603,22 +1036,6 @@ static void mirror_dtr(struct dm_target *ti)
free_context(ms, ti, ms->nr_mirrors);
}
-static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw)
-{
- unsigned long flags;
- int should_wake = 0;
- struct bio_list *bl;
-
- bl = (rw == WRITE) ? &ms->writes : &ms->reads;
- spin_lock_irqsave(&ms->lock, flags);
- should_wake = !(bl->head);
- bio_list_add(bl, bio);
- spin_unlock_irqrestore(&ms->lock, flags);
-
- if (should_wake)
- wake(ms);
-}
-
/*
* Mirror mapping function
*/
@@ -1629,16 +1046,16 @@ static int mirror_map(struct dm_target *ti, struct bio *bio,
struct mirror *m;
struct mirror_set *ms = ti->private;
struct dm_raid1_read_record *read_record = NULL;
+ struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
if (rw == WRITE) {
/* Save region for mirror_end_io() handler */
- map_context->ll = bio_to_region(&ms->rh, bio);
+ map_context->ll = dm_rh_bio_to_region(ms->rh, bio);
queue_bio(ms, bio, rw);
return DM_MAPIO_SUBMITTED;
}
- r = ms->rh.log->type->in_sync(ms->rh.log,
- bio_to_region(&ms->rh, bio), 0);
+ r = log->type->in_sync(log, dm_rh_bio_to_region(ms->rh, bio), 0);
if (r < 0 && r != -EWOULDBLOCK)
return r;
@@ -1686,7 +1103,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio,
* We need to dec pending if this was a write.
*/
if (rw == WRITE) {
- rh_dec(&ms->rh, map_context->ll);
+ dm_rh_dec(ms->rh, map_context->ll);
return error;
}
@@ -1742,7 +1159,7 @@ out:
static void mirror_presuspend(struct dm_target *ti)
{
struct mirror_set *ms = (struct mirror_set *) ti->private;
- struct dm_dirty_log *log = ms->rh.log;
+ struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
atomic_set(&ms->suspend, 1);
@@ -1750,10 +1167,10 @@ static void mirror_presuspend(struct dm_target *ti)
* We must finish up all the work that we've
* generated (i.e. recovery work).
*/
- rh_stop_recovery(&ms->rh);
+ dm_rh_stop_recovery(ms->rh);
wait_event(_kmirrord_recovery_stopped,
- !atomic_read(&ms->rh.recovery_in_flight));
+ !dm_rh_recovery_in_flight(ms->rh));
if (log->type->presuspend && log->type->presuspend(log))
/* FIXME: need better error handling */
@@ -1771,7 +1188,7 @@ static void mirror_presuspend(struct dm_target *ti)
static void mirror_postsuspend(struct dm_target *ti)
{
struct mirror_set *ms = ti->private;
- struct dm_dirty_log *log = ms->rh.log;
+ struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
if (log->type->postsuspend && log->type->postsuspend(log))
/* FIXME: need better error handling */
@@ -1781,13 +1198,13 @@ static void mirror_postsuspend(struct dm_target *ti)
static void mirror_resume(struct dm_target *ti)
{
struct mirror_set *ms = ti->private;
- struct dm_dirty_log *log = ms->rh.log;
+ struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
atomic_set(&ms->suspend, 0);
if (log->type->resume && log->type->resume(log))
/* FIXME: need better error handling */
DMWARN("log resume failed");
- rh_start_recovery(&ms->rh);
+ dm_rh_start_recovery(ms->rh);
}
/*
@@ -1819,7 +1236,7 @@ static int mirror_status(struct dm_target *ti, status_type_t type,
{
unsigned int m, sz = 0;
struct mirror_set *ms = (struct mirror_set *) ti->private;
- struct dm_dirty_log *log = ms->rh.log;
+ struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
char buffer[ms->nr_mirrors + 1];
switch (type) {
@@ -1832,15 +1249,15 @@ static int mirror_status(struct dm_target *ti, status_type_t type,
buffer[m] = '\0';
DMEMIT("%llu/%llu 1 %s ",
- (unsigned long long)log->type->get_sync_count(ms->rh.log),
+ (unsigned long long)log->type->get_sync_count(log),
(unsigned long long)ms->nr_regions, buffer);
- sz += log->type->status(ms->rh.log, type, result+sz, maxlen-sz);
+ sz += log->type->status(log, type, result+sz, maxlen-sz);
break;
case STATUSTYPE_TABLE:
- sz = log->type->status(ms->rh.log, type, result, maxlen);
+ sz = log->type->status(log, type, result, maxlen);
DMEMIT("%d", ms->nr_mirrors);
for (m = 0; m < ms->nr_mirrors; m++)
diff --git a/drivers/md/dm-region-hash.c b/drivers/md/dm-region-hash.c
new file mode 100644
index 000000000000..59f8d9df9e1a
--- /dev/null
+++ b/drivers/md/dm-region-hash.c
@@ -0,0 +1,704 @@
+/*
+ * Copyright (C) 2003 Sistina Software Limited.
+ * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
+ *
+ * This file is released under the GPL.
+ */
+
+#include <linux/dm-dirty-log.h>
+#include <linux/dm-region-hash.h>
+
+#include <linux/ctype.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+
+#include "dm.h"
+#include "dm-bio-list.h"
+
+#define DM_MSG_PREFIX "region hash"
+
+/*-----------------------------------------------------------------
+ * Region hash
+ *
+ * The mirror splits itself up into discrete regions. Each
+ * region can be in one of three states: clean, dirty,
+ * nosync. There is no need to put clean regions in the hash.
+ *
+ * In addition to being present in the hash table a region _may_
+ * be present on one of three lists.
+ *
+ * clean_regions: Regions on this list have no io pending to
+ * them, they are in sync, we are no longer interested in them,
+ * they are dull. dm_rh_update_states() will remove them from the
+ * hash table.
+ *
+ * quiesced_regions: These regions have been spun down, ready
+ * for recovery. rh_recovery_start() will remove regions from
+ * this list and hand them to kmirrord, which will schedule the
+ * recovery io with kcopyd.
+ *
+ * recovered_regions: Regions that kcopyd has successfully
+ * recovered. dm_rh_update_states() will now schedule any delayed
+ * io, up the recovery_count, and remove the region from the
+ * hash.
+ *
+ * There are 2 locks:
+ * A rw spin lock 'hash_lock' protects just the hash table,
+ * this is never held in write mode from interrupt context,
+ * which I believe means that we only have to disable irqs when
+ * doing a write lock.
+ *
+ * An ordinary spin lock 'region_lock' that protects the three
+ * lists in the region_hash, with the 'state', 'list' and
+ * 'delayed_bios' fields of the regions. This is used from irq
+ * context, so all other uses will have to suspend local irqs.
+ *---------------------------------------------------------------*/
+struct dm_region_hash {
+ uint32_t region_size;
+ unsigned region_shift;
+
+ /* holds persistent region state */
+ struct dm_dirty_log *log;
+
+ /* hash table */
+ rwlock_t hash_lock;
+ mempool_t *region_pool;
+ unsigned mask;
+ unsigned nr_buckets;
+ unsigned prime;
+ unsigned shift;
+ struct list_head *buckets;
+
+ unsigned max_recovery; /* Max # of regions to recover in parallel */
+
+ spinlock_t region_lock;
+ atomic_t recovery_in_flight;
+ struct semaphore recovery_count;
+ struct list_head clean_regions;
+ struct list_head quiesced_regions;
+ struct list_head recovered_regions;
+ struct list_head failed_recovered_regions;
+
+ void *context;
+ sector_t target_begin;
+
+ /* Callback function to schedule bios writes */
+ void (*dispatch_bios)(void *context, struct bio_list *bios);
+
+ /* Callback function to wakeup callers worker thread. */
+ void (*wakeup_workers)(void *context);
+
+ /* Callback function to wakeup callers recovery waiters. */
+ void (*wakeup_all_recovery_waiters)(void *context);
+};
+
+struct dm_region {
+ struct dm_region_hash *rh; /* FIXME: can we get rid of this ? */
+ region_t key;
+ int state;
+
+ struct list_head hash_list;
+ struct list_head list;
+
+ atomic_t pending;
+ struct bio_list delayed_bios;
+};
+
+/*
+ * Conversion fns
+ */
+static region_t dm_rh_sector_to_region(struct dm_region_hash *rh, sector_t sector)
+{
+ return sector >> rh->region_shift;
+}
+
+sector_t dm_rh_region_to_sector(struct dm_region_hash *rh, region_t region)
+{
+ return region << rh->region_shift;
+}
+EXPORT_SYMBOL_GPL(dm_rh_region_to_sector);
+
+region_t dm_rh_bio_to_region(struct dm_region_hash *rh, struct bio *bio)
+{
+ return dm_rh_sector_to_region(rh, bio->bi_sector - rh->target_begin);
+}
+EXPORT_SYMBOL_GPL(dm_rh_bio_to_region);
+
+void *dm_rh_region_context(struct dm_region *reg)
+{
+ return reg->rh->context;
+}
+EXPORT_SYMBOL_GPL(dm_rh_region_context);
+
+region_t dm_rh_get_region_key(struct dm_region *reg)
+{
+ return reg->key;
+}
+EXPORT_SYMBOL_GPL(dm_rh_get_region_key);
+
+sector_t dm_rh_get_region_size(struct dm_region_hash *rh)
+{
+ return rh->region_size;
+}
+EXPORT_SYMBOL_GPL(dm_rh_get_region_size);
+
+/*
+ * FIXME: shall we pass in a structure instead of all these args to
+ * dm_region_hash_create()????
+ */
+#define RH_HASH_MULT 2654435387U
+#define RH_HASH_SHIFT 12
+
+#define MIN_REGIONS 64
+struct dm_region_hash *dm_region_hash_create(
+ void *context, void (*dispatch_bios)(void *context,
+ struct bio_list *bios),
+ void (*wakeup_workers)(void *context),
+ void (*wakeup_all_recovery_waiters)(void *context),
+ sector_t target_begin, unsigned max_recovery,
+ struct dm_dirty_log *log, uint32_t region_size,
+ region_t nr_regions)
+{
+ struct dm_region_hash *rh;
+ unsigned nr_buckets, max_buckets;
+ size_t i;
+
+ /*
+ * Calculate a suitable number of buckets for our hash
+ * table.
+ */
+ max_buckets = nr_regions >> 6;
+ for (nr_buckets = 128u; nr_buckets < max_buckets; nr_buckets <<= 1)
+ ;
+ nr_buckets >>= 1;
+
+ rh = kmalloc(sizeof(*rh), GFP_KERNEL);
+ if (!rh) {
+ DMERR("unable to allocate region hash memory");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ rh->context = context;
+ rh->dispatch_bios = dispatch_bios;
+ rh->wakeup_workers = wakeup_workers;
+ rh->wakeup_all_recovery_waiters = wakeup_all_recovery_waiters;
+ rh->target_begin = target_begin;
+ rh->max_recovery = max_recovery;
+ rh->log = log;
+ rh->region_size = region_size;
+ rh->region_shift = ffs(region_size) - 1;
+ rwlock_init(&rh->hash_lock);
+ rh->mask = nr_buckets - 1;
+ rh->nr_buckets = nr_buckets;
+
+ rh->shift = RH_HASH_SHIFT;
+ rh->prime = RH_HASH_MULT;
+
+ rh->buckets = vmalloc(nr_buckets * sizeof(*rh->buckets));
+ if (!rh->buckets) {
+ DMERR("unable to allocate region hash bucket memory");
+ kfree(rh);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ for (i = 0; i < nr_buckets; i++)
+ INIT_LIST_HEAD(rh->buckets + i);
+
+ spin_lock_init(&rh->region_lock);
+ sema_init(&rh->recovery_count, 0);
+ atomic_set(&rh->recovery_in_flight, 0);
+ INIT_LIST_HEAD(&rh->clean_regions);
+ INIT_LIST_HEAD(&rh->quiesced_regions);
+ INIT_LIST_HEAD(&rh->recovered_regions);
+ INIT_LIST_HEAD(&rh->failed_recovered_regions);
+
+ rh->region_pool = mempool_create_kmalloc_pool(MIN_REGIONS,
+ sizeof(struct dm_region));
+ if (!rh->region_pool) {
+ vfree(rh->buckets);
+ kfree(rh);
+ rh = ERR_PTR(-ENOMEM);
+ }
+
+ return rh;
+}
+EXPORT_SYMBOL_GPL(dm_region_hash_create);
+
+void dm_region_hash_destroy(struct dm_region_hash *rh)
+{
+ unsigned h;
+ struct dm_region *reg, *nreg;
+
+ BUG_ON(!list_empty(&rh->quiesced_regions));
+ for (h = 0; h < rh->nr_buckets; h++) {
+ list_for_each_entry_safe(reg, nreg, rh->buckets + h,
+ hash_list) {
+ BUG_ON(atomic_read(&reg->pending));
+ mempool_free(reg, rh->region_pool);
+ }
+ }
+
+ if (rh->log)
+ dm_dirty_log_destroy(rh->log);
+
+ if (rh->region_pool)
+ mempool_destroy(rh->region_pool);
+
+ vfree(rh->buckets);
+ kfree(rh);
+}
+EXPORT_SYMBOL_GPL(dm_region_hash_destroy);
+
+struct dm_dirty_log *dm_rh_dirty_log(struct dm_region_hash *rh)
+{
+ return rh->log;
+}
+EXPORT_SYMBOL_GPL(dm_rh_dirty_log);
+
+static unsigned rh_hash(struct dm_region_hash *rh, region_t region)
+{
+ return (unsigned) ((region * rh->prime) >> rh->shift) & rh->mask;
+}
+
+static struct dm_region *__rh_lookup(struct dm_region_hash *rh, region_t region)
+{
+ struct dm_region *reg;
+ struct list_head *bucket = rh->buckets + rh_hash(rh, region);
+
+ list_for_each_entry(reg, bucket, hash_list)
+ if (reg->key == region)
+ return reg;
+
+ return NULL;
+}
+
+static void __rh_insert(struct dm_region_hash *rh, struct dm_region *reg)
+{
+ list_add(&reg->hash_list, rh->buckets + rh_hash(rh, reg->key));
+}
+
+static struct dm_region *__rh_alloc(struct dm_region_hash *rh, region_t region)
+{
+ struct dm_region *reg, *nreg;
+
+ nreg = mempool_alloc(rh->region_pool, GFP_ATOMIC);
+ if (unlikely(!nreg))
+ nreg = kmalloc(sizeof(*nreg), GFP_NOIO);
+
+ nreg->state = rh->log->type->in_sync(rh->log, region, 1) ?
+ DM_RH_CLEAN : DM_RH_NOSYNC;
+ nreg->rh = rh;
+ nreg->key = region;
+ INIT_LIST_HEAD(&nreg->list);
+ atomic_set(&nreg->pending, 0);
+ bio_list_init(&nreg->delayed_bios);
+
+ write_lock_irq(&rh->hash_lock);
+ reg = __rh_lookup(rh, region);
+ if (reg)
+ /* We lost the race. */
+ mempool_free(nreg, rh->region_pool);
+ else {
+ __rh_insert(rh, nreg);
+ if (nreg->state == DM_RH_CLEAN) {
+ spin_lock(&rh->region_lock);
+ list_add(&nreg->list, &rh->clean_regions);
+ spin_unlock(&rh->region_lock);
+ }
+
+ reg = nreg;
+ }
+ write_unlock_irq(&rh->hash_lock);
+
+ return reg;
+}
+
+static struct dm_region *__rh_find(struct dm_region_hash *rh, region_t region)
+{
+ struct dm_region *reg;
+
+ reg = __rh_lookup(rh, region);
+ if (!reg) {
+ read_unlock(&rh->hash_lock);
+ reg = __rh_alloc(rh, region);
+ read_lock(&rh->hash_lock);
+ }
+
+ return reg;
+}
+
+int dm_rh_get_state(struct dm_region_hash *rh, region_t region, int may_block)
+{
+ int r;
+ struct dm_region *reg;
+
+ read_lock(&rh->hash_lock);
+ reg = __rh_lookup(rh, region);
+ read_unlock(&rh->hash_lock);
+
+ if (reg)
+ return reg->state;
+
+ /*
+ * The region wasn't in the hash, so we fall back to the
+ * dirty log.
+ */
+ r = rh->log->type->in_sync(rh->log, region, may_block);
+
+ /*
+ * Any error from the dirty log (eg. -EWOULDBLOCK) gets
+ * taken as a DM_RH_NOSYNC
+ */
+ return r == 1 ? DM_RH_CLEAN : DM_RH_NOSYNC;
+}
+EXPORT_SYMBOL_GPL(dm_rh_get_state);
+
+static void complete_resync_work(struct dm_region *reg, int success)
+{
+ struct dm_region_hash *rh = reg->rh;
+
+ rh->log->type->set_region_sync(rh->log, reg->key, success);
+
+ /*
+ * Dispatch the bios before we call 'wake_up_all'.
+ * This is important because if we are suspending,
+ * we want to know that recovery is complete and
+ * the work queue is flushed. If we wake_up_all
+ * before we dispatch_bios (queue bios and call wake()),
+ * then we risk suspending before the work queue
+ * has been properly flushed.
+ */
+ rh->dispatch_bios(rh->context, &reg->delayed_bios);
+ if (atomic_dec_and_test(&rh->recovery_in_flight))
+ rh->wakeup_all_recovery_waiters(rh->context);
+ up(&rh->recovery_count);
+}
+
+/* dm_rh_mark_nosync
+ * @ms
+ * @bio
+ * @done
+ * @error
+ *
+ * The bio was written on some mirror(s) but failed on other mirror(s).
+ * We can successfully endio the bio but should avoid the region being
+ * marked clean by setting the state DM_RH_NOSYNC.
+ *
+ * This function is _not_ safe in interrupt context!
+ */
+void dm_rh_mark_nosync(struct dm_region_hash *rh,
+ struct bio *bio, unsigned done, int error)
+{
+ unsigned long flags;
+ struct dm_dirty_log *log = rh->log;
+ struct dm_region *reg;
+ region_t region = dm_rh_bio_to_region(rh, bio);
+ int recovering = 0;
+
+ /* We must inform the log that the sync count has changed. */
+ log->type->set_region_sync(log, region, 0);
+
+ read_lock(&rh->hash_lock);
+ reg = __rh_find(rh, region);
+ read_unlock(&rh->hash_lock);
+
+ /* region hash entry should exist because write was in-flight */
+ BUG_ON(!reg);
+ BUG_ON(!list_empty(&reg->list));
+
+ spin_lock_irqsave(&rh->region_lock, flags);
+ /*
+ * Possible cases:
+ * 1) DM_RH_DIRTY
+ * 2) DM_RH_NOSYNC: was dirty, other preceeding writes failed
+ * 3) DM_RH_RECOVERING: flushing pending writes
+ * Either case, the region should have not been connected to list.
+ */
+ recovering = (reg->state == DM_RH_RECOVERING);
+ reg->state = DM_RH_NOSYNC;
+ BUG_ON(!list_empty(&reg->list));
+ spin_unlock_irqrestore(&rh->region_lock, flags);
+
+ bio_endio(bio, error);
+ if (recovering)
+ complete_resync_work(reg, 0);
+}
+EXPORT_SYMBOL_GPL(dm_rh_mark_nosync);
+
+void dm_rh_update_states(struct dm_region_hash *rh, int errors_handled)
+{
+ struct dm_region *reg, *next;
+
+ LIST_HEAD(clean);
+ LIST_HEAD(recovered);
+ LIST_HEAD(failed_recovered);
+
+ /*
+ * Quickly grab the lists.
+ */
+ write_lock_irq(&rh->hash_lock);
+ spin_lock(&rh->region_lock);
+ if (!list_empty(&rh->clean_regions)) {
+ list_splice_init(&rh->clean_regions, &clean);
+
+ list_for_each_entry(reg, &clean, list)
+ list_del(&reg->hash_list);
+ }
+
+ if (!list_empty(&rh->recovered_regions)) {
+ list_splice_init(&rh->recovered_regions, &recovered);
+
+ list_for_each_entry(reg, &recovered, list)
+ list_del(&reg->hash_list);
+ }
+
+ if (!list_empty(&rh->failed_recovered_regions)) {
+ list_splice_init(&rh->failed_recovered_regions,
+ &failed_recovered);
+
+ list_for_each_entry(reg, &failed_recovered, list)
+ list_del(&reg->hash_list);
+ }
+
+ spin_unlock(&rh->region_lock);
+ write_unlock_irq(&rh->hash_lock);
+
+ /*
+ * All the regions on the recovered and clean lists have
+ * now been pulled out of the system, so no need to do
+ * any more locking.
+ */
+ list_for_each_entry_safe(reg, next, &recovered, list) {
+ rh->log->type->clear_region(rh->log, reg->key);
+ complete_resync_work(reg, 1);
+ mempool_free(reg, rh->region_pool);
+ }
+
+ list_for_each_entry_safe(reg, next, &failed_recovered, list) {
+ complete_resync_work(reg, errors_handled ? 0 : 1);
+ mempool_free(reg, rh->region_pool);
+ }
+
+ list_for_each_entry_safe(reg, next, &clean, list) {
+ rh->log->type->clear_region(rh->log, reg->key);
+ mempool_free(reg, rh->region_pool);
+ }
+
+ rh->log->type->flush(rh->log);
+}
+EXPORT_SYMBOL_GPL(dm_rh_update_states);
+
+static void rh_inc(struct dm_region_hash *rh, region_t region)
+{
+ struct dm_region *reg;
+
+ read_lock(&rh->hash_lock);
+ reg = __rh_find(rh, region);
+
+ spin_lock_irq(&rh->region_lock);
+ atomic_inc(&reg->pending);
+
+ if (reg->state == DM_RH_CLEAN) {
+ reg->state = DM_RH_DIRTY;
+ list_del_init(&reg->list); /* take off the clean list */
+ spin_unlock_irq(&rh->region_lock);
+
+ rh->log->type->mark_region(rh->log, reg->key);
+ } else
+ spin_unlock_irq(&rh->region_lock);
+
+
+ read_unlock(&rh->hash_lock);
+}
+
+void dm_rh_inc_pending(struct dm_region_hash *rh, struct bio_list *bios)
+{
+ struct bio *bio;
+
+ for (bio = bios->head; bio; bio = bio->bi_next)
+ rh_inc(rh, dm_rh_bio_to_region(rh, bio));
+}
+EXPORT_SYMBOL_GPL(dm_rh_inc_pending);
+
+void dm_rh_dec(struct dm_region_hash *rh, region_t region)
+{
+ unsigned long flags;
+ struct dm_region *reg;
+ int should_wake = 0;
+
+ read_lock(&rh->hash_lock);
+ reg = __rh_lookup(rh, region);
+ read_unlock(&rh->hash_lock);
+
+ spin_lock_irqsave(&rh->region_lock, flags);
+ if (atomic_dec_and_test(&reg->pending)) {
+ /*
+ * There is no pending I/O for this region.
+ * We can move the region to corresponding list for next action.
+ * At this point, the region is not yet connected to any list.
+ *
+ * If the state is DM_RH_NOSYNC, the region should be kept off
+ * from clean list.
+ * The hash entry for DM_RH_NOSYNC will remain in memory
+ * until the region is recovered or the map is reloaded.
+ */
+
+ /* do nothing for DM_RH_NOSYNC */
+ if (reg->state == DM_RH_RECOVERING) {
+ list_add_tail(&reg->list, &rh->quiesced_regions);
+ } else if (reg->state == DM_RH_DIRTY) {
+ reg->state = DM_RH_CLEAN;
+ list_add(&reg->list, &rh->clean_regions);
+ }
+ should_wake = 1;
+ }
+ spin_unlock_irqrestore(&rh->region_lock, flags);
+
+ if (should_wake)
+ rh->wakeup_workers(rh->context);
+}
+EXPORT_SYMBOL_GPL(dm_rh_dec);
+
+/*
+ * Starts quiescing a region in preparation for recovery.
+ */
+static int __rh_recovery_prepare(struct dm_region_hash *rh)
+{
+ int r;
+ region_t region;
+ struct dm_region *reg;
+
+ /*
+ * Ask the dirty log what's next.
+ */
+ r = rh->log->type->get_resync_work(rh->log, &region);
+ if (r <= 0)
+ return r;
+
+ /*
+ * Get this region, and start it quiescing by setting the
+ * recovering flag.
+ */
+ read_lock(&rh->hash_lock);
+ reg = __rh_find(rh, region);
+ read_unlock(&rh->hash_lock);
+
+ spin_lock_irq(&rh->region_lock);
+ reg->state = DM_RH_RECOVERING;
+
+ /* Already quiesced ? */
+ if (atomic_read(&reg->pending))
+ list_del_init(&reg->list);
+ else
+ list_move(&reg->list, &rh->quiesced_regions);
+
+ spin_unlock_irq(&rh->region_lock);
+
+ return 1;
+}
+
+void dm_rh_recovery_prepare(struct dm_region_hash *rh)
+{
+ /* Extra reference to avoid race with dm_rh_stop_recovery */
+ atomic_inc(&rh->recovery_in_flight);
+
+ while (!down_trylock(&rh->recovery_count)) {
+ atomic_inc(&rh->recovery_in_flight);
+ if (__rh_recovery_prepare(rh) <= 0) {
+ atomic_dec(&rh->recovery_in_flight);
+ up(&rh->recovery_count);
+ break;
+ }
+ }
+
+ /* Drop the extra reference */
+ if (atomic_dec_and_test(&rh->recovery_in_flight))
+ rh->wakeup_all_recovery_waiters(rh->context);
+}
+EXPORT_SYMBOL_GPL(dm_rh_recovery_prepare);
+
+/*
+ * Returns any quiesced regions.
+ */
+struct dm_region *dm_rh_recovery_start(struct dm_region_hash *rh)
+{
+ struct dm_region *reg = NULL;
+
+ spin_lock_irq(&rh->region_lock);
+ if (!list_empty(&rh->quiesced_regions)) {
+ reg = list_entry(rh->quiesced_regions.next,
+ struct dm_region, list);
+ list_del_init(&reg->list); /* remove from the quiesced list */
+ }
+ spin_unlock_irq(&rh->region_lock);
+
+ return reg;
+}
+EXPORT_SYMBOL_GPL(dm_rh_recovery_start);
+
+void dm_rh_recovery_end(struct dm_region *reg, int success)
+{
+ struct dm_region_hash *rh = reg->rh;
+
+ spin_lock_irq(&rh->region_lock);
+ if (success)
+ list_add(&reg->list, &reg->rh->recovered_regions);
+ else {
+ reg->state = DM_RH_NOSYNC;
+ list_add(&reg->list, &reg->rh->failed_recovered_regions);
+ }
+ spin_unlock_irq(&rh->region_lock);
+
+ rh->wakeup_workers(rh->context);
+}
+EXPORT_SYMBOL_GPL(dm_rh_recovery_end);
+
+/* Return recovery in flight count. */
+int dm_rh_recovery_in_flight(struct dm_region_hash *rh)
+{
+ return atomic_read(&rh->recovery_in_flight);
+}
+EXPORT_SYMBOL_GPL(dm_rh_recovery_in_flight);
+
+int dm_rh_flush(struct dm_region_hash *rh)
+{
+ return rh->log->type->flush(rh->log);
+}
+EXPORT_SYMBOL_GPL(dm_rh_flush);
+
+void dm_rh_delay(struct dm_region_hash *rh, struct bio *bio)
+{
+ struct dm_region *reg;
+
+ read_lock(&rh->hash_lock);
+ reg = __rh_find(rh, dm_rh_bio_to_region(rh, bio));
+ bio_list_add(&reg->delayed_bios, bio);
+ read_unlock(&rh->hash_lock);
+}
+EXPORT_SYMBOL_GPL(dm_rh_delay);
+
+void dm_rh_stop_recovery(struct dm_region_hash *rh)
+{
+ int i;
+
+ /* wait for any recovering regions */
+ for (i = 0; i < rh->max_recovery; i++)
+ down(&rh->recovery_count);
+}
+EXPORT_SYMBOL_GPL(dm_rh_stop_recovery);
+
+void dm_rh_start_recovery(struct dm_region_hash *rh)
+{
+ int i;
+
+ for (i = 0; i < rh->max_recovery; i++)
+ up(&rh->recovery_count);
+
+ rh->wakeup_workers(rh->context);
+}
+EXPORT_SYMBOL_GPL(dm_rh_start_recovery);
+
+MODULE_DESCRIPTION(DM_NAME " region hash");
+MODULE_AUTHOR("Joe Thornber/Heinz Mauelshagen <dm-devel@redhat.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/md/dm-round-robin.c b/drivers/md/dm-round-robin.c
index 391dfa2ad434..cdfbf65b28cb 100644
--- a/drivers/md/dm-round-robin.c
+++ b/drivers/md/dm-round-robin.c
@@ -9,7 +9,8 @@
* Round-robin path selector.
*/
-#include "dm.h"
+#include <linux/device-mapper.h>
+
#include "dm-path-selector.h"
#include <linux/slab.h>
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index 1ba8a47d61b1..b2d9d1ac28ad 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -40,6 +40,11 @@
*/
#define SNAPSHOT_PAGES (((1UL << 20) >> PAGE_SHIFT) ? : 1)
+/*
+ * The size of the mempool used to track chunks in use.
+ */
+#define MIN_IOS 256
+
static struct workqueue_struct *ksnapd;
static void flush_queued_bios(struct work_struct *work);
@@ -91,7 +96,63 @@ struct dm_snap_pending_exception {
*/
static struct kmem_cache *exception_cache;
static struct kmem_cache *pending_cache;
-static mempool_t *pending_pool;
+
+struct dm_snap_tracked_chunk {
+ struct hlist_node node;
+ chunk_t chunk;
+};
+
+static struct kmem_cache *tracked_chunk_cache;
+
+static struct dm_snap_tracked_chunk *track_chunk(struct dm_snapshot *s,
+ chunk_t chunk)
+{
+ struct dm_snap_tracked_chunk *c = mempool_alloc(s->tracked_chunk_pool,
+ GFP_NOIO);
+ unsigned long flags;
+
+ c->chunk = chunk;
+
+ spin_lock_irqsave(&s->tracked_chunk_lock, flags);
+ hlist_add_head(&c->node,
+ &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)]);
+ spin_unlock_irqrestore(&s->tracked_chunk_lock, flags);
+
+ return c;
+}
+
+static void stop_tracking_chunk(struct dm_snapshot *s,
+ struct dm_snap_tracked_chunk *c)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&s->tracked_chunk_lock, flags);
+ hlist_del(&c->node);
+ spin_unlock_irqrestore(&s->tracked_chunk_lock, flags);
+
+ mempool_free(c, s->tracked_chunk_pool);
+}
+
+static int __chunk_is_tracked(struct dm_snapshot *s, chunk_t chunk)
+{
+ struct dm_snap_tracked_chunk *c;
+ struct hlist_node *hn;
+ int found = 0;
+
+ spin_lock_irq(&s->tracked_chunk_lock);
+
+ hlist_for_each_entry(c, hn,
+ &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)], node) {
+ if (c->chunk == chunk) {
+ found = 1;
+ break;
+ }
+ }
+
+ spin_unlock_irq(&s->tracked_chunk_lock);
+
+ return found;
+}
/*
* One of these per registered origin, held in the snapshot_origins hash
@@ -302,14 +363,19 @@ static void free_exception(struct dm_snap_exception *e)
kmem_cache_free(exception_cache, e);
}
-static struct dm_snap_pending_exception *alloc_pending_exception(void)
+static struct dm_snap_pending_exception *alloc_pending_exception(struct dm_snapshot *s)
{
- return mempool_alloc(pending_pool, GFP_NOIO);
+ struct dm_snap_pending_exception *pe = mempool_alloc(s->pending_pool,
+ GFP_NOIO);
+
+ pe->snap = s;
+
+ return pe;
}
static void free_pending_exception(struct dm_snap_pending_exception *pe)
{
- mempool_free(pe, pending_pool);
+ mempool_free(pe, pe->snap->pending_pool);
}
static void insert_completed_exception(struct dm_snapshot *s,
@@ -482,6 +548,7 @@ static int set_chunk_size(struct dm_snapshot *s, const char *chunk_size_arg,
static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
{
struct dm_snapshot *s;
+ int i;
int r = -EINVAL;
char persistent;
char *origin_path;
@@ -533,7 +600,6 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
s->valid = 1;
s->active = 0;
- s->last_percent = 0;
init_rwsem(&s->lock);
spin_lock_init(&s->pe_lock);
s->ti = ti;
@@ -564,11 +630,30 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
goto bad5;
}
+ s->pending_pool = mempool_create_slab_pool(MIN_IOS, pending_cache);
+ if (!s->pending_pool) {
+ ti->error = "Could not allocate mempool for pending exceptions";
+ goto bad6;
+ }
+
+ s->tracked_chunk_pool = mempool_create_slab_pool(MIN_IOS,
+ tracked_chunk_cache);
+ if (!s->tracked_chunk_pool) {
+ ti->error = "Could not allocate tracked_chunk mempool for "
+ "tracking reads";
+ goto bad_tracked_chunk_pool;
+ }
+
+ for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++)
+ INIT_HLIST_HEAD(&s->tracked_chunk_hash[i]);
+
+ spin_lock_init(&s->tracked_chunk_lock);
+
/* Metadata must only be loaded into one table at once */
r = s->store.read_metadata(&s->store);
if (r < 0) {
ti->error = "Failed to read snapshot metadata";
- goto bad6;
+ goto bad_load_and_register;
} else if (r > 0) {
s->valid = 0;
DMWARN("Snapshot is marked invalid.");
@@ -582,7 +667,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
if (register_snapshot(s)) {
r = -EINVAL;
ti->error = "Cannot register snapshot origin";
- goto bad6;
+ goto bad_load_and_register;
}
ti->private = s;
@@ -590,6 +675,12 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
return 0;
+ bad_load_and_register:
+ mempool_destroy(s->tracked_chunk_pool);
+
+ bad_tracked_chunk_pool:
+ mempool_destroy(s->pending_pool);
+
bad6:
dm_kcopyd_client_destroy(s->kcopyd_client);
@@ -624,6 +715,9 @@ static void __free_exceptions(struct dm_snapshot *s)
static void snapshot_dtr(struct dm_target *ti)
{
+#ifdef CONFIG_DM_DEBUG
+ int i;
+#endif
struct dm_snapshot *s = ti->private;
flush_workqueue(ksnapd);
@@ -632,8 +726,17 @@ static void snapshot_dtr(struct dm_target *ti)
/* After this returns there can be no new kcopyd jobs. */
unregister_snapshot(s);
+#ifdef CONFIG_DM_DEBUG
+ for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++)
+ BUG_ON(!hlist_empty(&s->tracked_chunk_hash[i]));
+#endif
+
+ mempool_destroy(s->tracked_chunk_pool);
+
__free_exceptions(s);
+ mempool_destroy(s->pending_pool);
+
dm_put_device(ti, s->origin);
dm_put_device(ti, s->cow);
@@ -720,8 +823,10 @@ static struct bio *put_pending_exception(struct dm_snap_pending_exception *pe)
* the bios for the original write to the origin.
*/
if (primary_pe &&
- atomic_dec_and_test(&primary_pe->ref_count))
+ atomic_dec_and_test(&primary_pe->ref_count)) {
origin_bios = bio_list_get(&primary_pe->origin_bios);
+ free_pending_exception(primary_pe);
+ }
/*
* Free the pe if it's not linked to an origin write or if
@@ -730,12 +835,6 @@ static struct bio *put_pending_exception(struct dm_snap_pending_exception *pe)
if (!primary_pe || primary_pe != pe)
free_pending_exception(pe);
- /*
- * Free the primary pe if nothing references it.
- */
- if (primary_pe && !atomic_read(&primary_pe->ref_count))
- free_pending_exception(primary_pe);
-
return origin_bios;
}
@@ -772,6 +871,13 @@ static void pending_complete(struct dm_snap_pending_exception *pe, int success)
}
/*
+ * Check for conflicting reads. This is extremely improbable,
+ * so yield() is sufficient and there is no need for a wait queue.
+ */
+ while (__chunk_is_tracked(s, pe->e.old_chunk))
+ yield();
+
+ /*
* Add a proper exception, and remove the
* in-flight exception from the list.
*/
@@ -873,7 +979,7 @@ __find_pending_exception(struct dm_snapshot *s, struct bio *bio)
* to hold the lock while we do this.
*/
up_write(&s->lock);
- pe = alloc_pending_exception();
+ pe = alloc_pending_exception(s);
down_write(&s->lock);
if (!s->valid) {
@@ -893,7 +999,6 @@ __find_pending_exception(struct dm_snapshot *s, struct bio *bio)
bio_list_init(&pe->snapshot_bios);
pe->primary_pe = NULL;
atomic_set(&pe->ref_count, 0);
- pe->snap = s;
pe->started = 0;
if (s->store.prepare_exception(&s->store, &pe->e)) {
@@ -974,14 +1079,10 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio,
start_copy(pe);
goto out;
}
- } else
- /*
- * FIXME: this read path scares me because we
- * always use the origin when we have a pending
- * exception. However I can't think of a
- * situation where this is wrong - ejt.
- */
+ } else {
bio->bi_bdev = s->origin->bdev;
+ map_context->ptr = track_chunk(s, chunk);
+ }
out_unlock:
up_write(&s->lock);
@@ -989,6 +1090,18 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio,
return r;
}
+static int snapshot_end_io(struct dm_target *ti, struct bio *bio,
+ int error, union map_info *map_context)
+{
+ struct dm_snapshot *s = ti->private;
+ struct dm_snap_tracked_chunk *c = map_context->ptr;
+
+ if (c)
+ stop_tracking_chunk(s, c);
+
+ return 0;
+}
+
static void snapshot_resume(struct dm_target *ti)
{
struct dm_snapshot *s = ti->private;
@@ -1266,6 +1379,7 @@ static struct target_type snapshot_target = {
.ctr = snapshot_ctr,
.dtr = snapshot_dtr,
.map = snapshot_map,
+ .end_io = snapshot_end_io,
.resume = snapshot_resume,
.status = snapshot_status,
};
@@ -1306,9 +1420,9 @@ static int __init dm_snapshot_init(void)
goto bad4;
}
- pending_pool = mempool_create_slab_pool(128, pending_cache);
- if (!pending_pool) {
- DMERR("Couldn't create pending pool.");
+ tracked_chunk_cache = KMEM_CACHE(dm_snap_tracked_chunk, 0);
+ if (!tracked_chunk_cache) {
+ DMERR("Couldn't create cache to track chunks in use.");
r = -ENOMEM;
goto bad5;
}
@@ -1317,13 +1431,13 @@ static int __init dm_snapshot_init(void)
if (!ksnapd) {
DMERR("Failed to create ksnapd workqueue.");
r = -ENOMEM;
- goto bad6;
+ goto bad_pending_pool;
}
return 0;
- bad6:
- mempool_destroy(pending_pool);
+ bad_pending_pool:
+ kmem_cache_destroy(tracked_chunk_cache);
bad5:
kmem_cache_destroy(pending_cache);
bad4:
@@ -1352,9 +1466,9 @@ static void __exit dm_snapshot_exit(void)
DMERR("origin unregister failed %d", r);
exit_origin_hash();
- mempool_destroy(pending_pool);
kmem_cache_destroy(pending_cache);
kmem_cache_destroy(exception_cache);
+ kmem_cache_destroy(tracked_chunk_cache);
}
/* Module hooks */
diff --git a/drivers/md/dm-snap.h b/drivers/md/dm-snap.h
index 24f9fb73b982..f07315fe2362 100644
--- a/drivers/md/dm-snap.h
+++ b/drivers/md/dm-snap.h
@@ -9,7 +9,7 @@
#ifndef DM_SNAPSHOT_H
#define DM_SNAPSHOT_H
-#include "dm.h"
+#include <linux/device-mapper.h>
#include "dm-bio-list.h"
#include <linux/blkdev.h>
#include <linux/workqueue.h>
@@ -130,6 +130,10 @@ struct exception_store {
void *context;
};
+#define DM_TRACKED_CHUNK_HASH_SIZE 16
+#define DM_TRACKED_CHUNK_HASH(x) ((unsigned long)(x) & \
+ (DM_TRACKED_CHUNK_HASH_SIZE - 1))
+
struct dm_snapshot {
struct rw_semaphore lock;
struct dm_target *ti;
@@ -154,8 +158,7 @@ struct dm_snapshot {
/* Used for display of table */
char type;
- /* The last percentage we notified */
- int last_percent;
+ mempool_t *pending_pool;
struct exception_table pending;
struct exception_table complete;
@@ -174,6 +177,11 @@ struct dm_snapshot {
/* Queue of snapshot writes for ksnapd to flush */
struct bio_list queued_bios;
struct work_struct queued_bios_work;
+
+ /* Chunks with outstanding reads */
+ mempool_t *tracked_chunk_pool;
+ spinlock_t tracked_chunk_lock;
+ struct hlist_head tracked_chunk_hash[DM_TRACKED_CHUNK_HASH_SIZE];
};
/*
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
index 4de90ab3968b..a2d068dbe9e2 100644
--- a/drivers/md/dm-stripe.c
+++ b/drivers/md/dm-stripe.c
@@ -4,7 +4,7 @@
* This file is released under the GPL.
*/
-#include "dm.h"
+#include <linux/device-mapper.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -60,8 +60,8 @@ static inline struct stripe_c *alloc_context(unsigned int stripes)
{
size_t len;
- if (array_too_big(sizeof(struct stripe_c), sizeof(struct stripe),
- stripes))
+ if (dm_array_too_big(sizeof(struct stripe_c), sizeof(struct stripe),
+ stripes))
return NULL;
len = sizeof(struct stripe_c) + (sizeof(struct stripe) * stripes);
@@ -284,8 +284,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio,
memset(major_minor, 0, sizeof(major_minor));
sprintf(major_minor, "%d:%d",
- bio->bi_bdev->bd_disk->major,
- bio->bi_bdev->bd_disk->first_minor);
+ MAJOR(disk_devt(bio->bi_bdev->bd_disk)),
+ MINOR(disk_devt(bio->bi_bdev->bd_disk)));
/*
* Test to see which stripe drive triggered the event
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 94116eaf4709..a63161aec487 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -43,7 +43,7 @@ struct dm_table {
* device. This should be a combination of FMODE_READ
* and FMODE_WRITE.
*/
- int mode;
+ fmode_t mode;
/* a list of devices used by this table */
struct list_head devices;
@@ -217,7 +217,7 @@ static int alloc_targets(struct dm_table *t, unsigned int num)
return 0;
}
-int dm_table_create(struct dm_table **result, int mode,
+int dm_table_create(struct dm_table **result, fmode_t mode,
unsigned num_targets, struct mapped_device *md)
{
struct dm_table *t = kzalloc(sizeof(*t), GFP_KERNEL);
@@ -250,7 +250,8 @@ static void free_devices(struct list_head *devices)
struct list_head *tmp, *next;
list_for_each_safe(tmp, next, devices) {
- struct dm_dev *dd = list_entry(tmp, struct dm_dev, list);
+ struct dm_dev_internal *dd =
+ list_entry(tmp, struct dm_dev_internal, list);
kfree(dd);
}
}
@@ -312,44 +313,14 @@ static inline int check_space(struct dm_table *t)
}
/*
- * Convert a device path to a dev_t.
- */
-static int lookup_device(const char *path, dev_t *dev)
-{
- int r;
- struct nameidata nd;
- struct inode *inode;
-
- if ((r = path_lookup(path, LOOKUP_FOLLOW, &nd)))
- return r;
-
- inode = nd.path.dentry->d_inode;
- if (!inode) {
- r = -ENOENT;
- goto out;
- }
-
- if (!S_ISBLK(inode->i_mode)) {
- r = -ENOTBLK;
- goto out;
- }
-
- *dev = inode->i_rdev;
-
- out:
- path_put(&nd.path);
- return r;
-}
-
-/*
* See if we've already got a device in the list.
*/
-static struct dm_dev *find_device(struct list_head *l, dev_t dev)
+static struct dm_dev_internal *find_device(struct list_head *l, dev_t dev)
{
- struct dm_dev *dd;
+ struct dm_dev_internal *dd;
list_for_each_entry (dd, l, list)
- if (dd->bdev->bd_dev == dev)
+ if (dd->dm_dev.bdev->bd_dev == dev)
return dd;
return NULL;
@@ -358,45 +329,47 @@ static struct dm_dev *find_device(struct list_head *l, dev_t dev)
/*
* Open a device so we can use it as a map destination.
*/
-static int open_dev(struct dm_dev *d, dev_t dev, struct mapped_device *md)
+static int open_dev(struct dm_dev_internal *d, dev_t dev,
+ struct mapped_device *md)
{
static char *_claim_ptr = "I belong to device-mapper";
struct block_device *bdev;
int r;
- BUG_ON(d->bdev);
+ BUG_ON(d->dm_dev.bdev);
- bdev = open_by_devnum(dev, d->mode);
+ bdev = open_by_devnum(dev, d->dm_dev.mode);
if (IS_ERR(bdev))
return PTR_ERR(bdev);
r = bd_claim_by_disk(bdev, _claim_ptr, dm_disk(md));
if (r)
- blkdev_put(bdev);
+ blkdev_put(bdev, d->dm_dev.mode);
else
- d->bdev = bdev;
+ d->dm_dev.bdev = bdev;
return r;
}
/*
* Close a device that we've been using.
*/
-static void close_dev(struct dm_dev *d, struct mapped_device *md)
+static void close_dev(struct dm_dev_internal *d, struct mapped_device *md)
{
- if (!d->bdev)
+ if (!d->dm_dev.bdev)
return;
- bd_release_from_disk(d->bdev, dm_disk(md));
- blkdev_put(d->bdev);
- d->bdev = NULL;
+ bd_release_from_disk(d->dm_dev.bdev, dm_disk(md));
+ blkdev_put(d->dm_dev.bdev, d->dm_dev.mode);
+ d->dm_dev.bdev = NULL;
}
/*
* If possible, this checks an area of a destination device is valid.
*/
-static int check_device_area(struct dm_dev *dd, sector_t start, sector_t len)
+static int check_device_area(struct dm_dev_internal *dd, sector_t start,
+ sector_t len)
{
- sector_t dev_size = dd->bdev->bd_inode->i_size >> SECTOR_SHIFT;
+ sector_t dev_size = dd->dm_dev.bdev->bd_inode->i_size >> SECTOR_SHIFT;
if (!dev_size)
return 1;
@@ -409,16 +382,17 @@ static int check_device_area(struct dm_dev *dd, sector_t start, sector_t len)
* careful to leave things as they were if we fail to reopen the
* device.
*/
-static int upgrade_mode(struct dm_dev *dd, int new_mode, struct mapped_device *md)
+static int upgrade_mode(struct dm_dev_internal *dd, fmode_t new_mode,
+ struct mapped_device *md)
{
int r;
- struct dm_dev dd_copy;
- dev_t dev = dd->bdev->bd_dev;
+ struct dm_dev_internal dd_copy;
+ dev_t dev = dd->dm_dev.bdev->bd_dev;
dd_copy = *dd;
- dd->mode |= new_mode;
- dd->bdev = NULL;
+ dd->dm_dev.mode |= new_mode;
+ dd->dm_dev.bdev = NULL;
r = open_dev(dd, dev, md);
if (!r)
close_dev(&dd_copy, md);
@@ -434,11 +408,11 @@ static int upgrade_mode(struct dm_dev *dd, int new_mode, struct mapped_device *m
*/
static int __table_get_device(struct dm_table *t, struct dm_target *ti,
const char *path, sector_t start, sector_t len,
- int mode, struct dm_dev **result)
+ fmode_t mode, struct dm_dev **result)
{
int r;
dev_t uninitialized_var(dev);
- struct dm_dev *dd;
+ struct dm_dev_internal *dd;
unsigned int major, minor;
BUG_ON(!t);
@@ -450,8 +424,12 @@ static int __table_get_device(struct dm_table *t, struct dm_target *ti,
return -EOVERFLOW;
} else {
/* convert the path to a device */
- if ((r = lookup_device(path, &dev)))
- return r;
+ struct block_device *bdev = lookup_bdev(path);
+
+ if (IS_ERR(bdev))
+ return PTR_ERR(bdev);
+ dev = bdev->bd_dev;
+ bdput(bdev);
}
dd = find_device(&t->devices, dev);
@@ -460,20 +438,20 @@ static int __table_get_device(struct dm_table *t, struct dm_target *ti,
if (!dd)
return -ENOMEM;
- dd->mode = mode;
- dd->bdev = NULL;
+ dd->dm_dev.mode = mode;
+ dd->dm_dev.bdev = NULL;
if ((r = open_dev(dd, dev, t->md))) {
kfree(dd);
return r;
}
- format_dev_t(dd->name, dev);
+ format_dev_t(dd->dm_dev.name, dev);
atomic_set(&dd->count, 0);
list_add(&dd->list, &t->devices);
- } else if (dd->mode != (mode | dd->mode)) {
+ } else if (dd->dm_dev.mode != (mode | dd->dm_dev.mode)) {
r = upgrade_mode(dd, mode, t->md);
if (r)
return r;
@@ -482,11 +460,11 @@ static int __table_get_device(struct dm_table *t, struct dm_target *ti,
if (!check_device_area(dd, start, len)) {
DMWARN("device %s too small for target", path);
- dm_put_device(ti, dd);
+ dm_put_device(ti, &dd->dm_dev);
return -EINVAL;
}
- *result = dd;
+ *result = &dd->dm_dev;
return 0;
}
@@ -495,6 +473,13 @@ void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev)
{
struct request_queue *q = bdev_get_queue(bdev);
struct io_restrictions *rs = &ti->limits;
+ char b[BDEVNAME_SIZE];
+
+ if (unlikely(!q)) {
+ DMWARN("%s: Cannot set limits for nonexistent device %s",
+ dm_device_name(ti->table->md), bdevname(bdev, b));
+ return;
+ }
/*
* Combine the device limits low.
@@ -506,14 +491,13 @@ void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev)
rs->max_sectors =
min_not_zero(rs->max_sectors, q->max_sectors);
- /* FIXME: Device-Mapper on top of RAID-0 breaks because DM
- * currently doesn't honor MD's merge_bvec_fn routine.
- * In this case, we'll force DM to use PAGE_SIZE or
- * smaller I/O, just to be safe. A better fix is in the
- * works, but add this for the time being so it will at
- * least operate correctly.
+ /*
+ * Check if merge fn is supported.
+ * If not we'll force DM to use PAGE_SIZE or
+ * smaller I/O, just to be safe.
*/
- if (q->merge_bvec_fn)
+
+ if (q->merge_bvec_fn && !ti->type->merge)
rs->max_sectors =
min_not_zero(rs->max_sectors,
(unsigned int) (PAGE_SIZE >> 9));
@@ -544,7 +528,7 @@ void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev)
EXPORT_SYMBOL_GPL(dm_set_device_limits);
int dm_get_device(struct dm_target *ti, const char *path, sector_t start,
- sector_t len, int mode, struct dm_dev **result)
+ sector_t len, fmode_t mode, struct dm_dev **result)
{
int r = __table_get_device(ti->table, ti, path,
start, len, mode, result);
@@ -558,8 +542,11 @@ int dm_get_device(struct dm_target *ti, const char *path, sector_t start,
/*
* Decrement a devices use count and remove it if necessary.
*/
-void dm_put_device(struct dm_target *ti, struct dm_dev *dd)
+void dm_put_device(struct dm_target *ti, struct dm_dev *d)
{
+ struct dm_dev_internal *dd = container_of(d, struct dm_dev_internal,
+ dm_dev);
+
if (atomic_dec_and_test(&dd->count)) {
close_dev(dd, ti->table->md);
list_del(&dd->list);
@@ -891,7 +878,7 @@ struct list_head *dm_table_get_devices(struct dm_table *t)
return &t->devices;
}
-int dm_table_get_mode(struct dm_table *t)
+fmode_t dm_table_get_mode(struct dm_table *t)
{
return t->mode;
}
@@ -955,13 +942,20 @@ int dm_table_resume_targets(struct dm_table *t)
int dm_table_any_congested(struct dm_table *t, int bdi_bits)
{
- struct dm_dev *dd;
+ struct dm_dev_internal *dd;
struct list_head *devices = dm_table_get_devices(t);
int r = 0;
list_for_each_entry(dd, devices, list) {
- struct request_queue *q = bdev_get_queue(dd->bdev);
- r |= bdi_congested(&q->backing_dev_info, bdi_bits);
+ struct request_queue *q = bdev_get_queue(dd->dm_dev.bdev);
+ char b[BDEVNAME_SIZE];
+
+ if (likely(q))
+ r |= bdi_congested(&q->backing_dev_info, bdi_bits);
+ else
+ DMWARN_LIMIT("%s: any_congested: nonexistent device %s",
+ dm_device_name(t->md),
+ bdevname(dd->dm_dev.bdev, b));
}
return r;
@@ -969,13 +963,19 @@ int dm_table_any_congested(struct dm_table *t, int bdi_bits)
void dm_table_unplug_all(struct dm_table *t)
{
- struct dm_dev *dd;
+ struct dm_dev_internal *dd;
struct list_head *devices = dm_table_get_devices(t);
list_for_each_entry(dd, devices, list) {
- struct request_queue *q = bdev_get_queue(dd->bdev);
-
- blk_unplug(q);
+ struct request_queue *q = bdev_get_queue(dd->dm_dev.bdev);
+ char b[BDEVNAME_SIZE];
+
+ if (likely(q))
+ blk_unplug(q);
+ else
+ DMWARN_LIMIT("%s: Cannot unplug nonexistent device %s",
+ dm_device_name(t->md),
+ bdevname(dd->dm_dev.bdev, b));
}
}
diff --git a/drivers/md/dm-zero.c b/drivers/md/dm-zero.c
index bdec206c404b..cdbf126ec106 100644
--- a/drivers/md/dm-zero.c
+++ b/drivers/md/dm-zero.c
@@ -4,7 +4,7 @@
* This file is released under the GPL.
*/
-#include "dm.h"
+#include <linux/device-mapper.h>
#include <linux/module.h>
#include <linux/init.h>
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 372369b1cc20..6963ad148408 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -21,7 +21,6 @@
#include <linux/idr.h>
#include <linux/hdreg.h>
#include <linux/blktrace_api.h>
-#include <linux/smp_lock.h>
#define DM_MSG_PREFIX "core"
@@ -37,8 +36,8 @@ static DEFINE_SPINLOCK(_minor_lock);
struct dm_io {
struct mapped_device *md;
int error;
- struct bio *bio;
atomic_t io_count;
+ struct bio *bio;
unsigned long start_time;
};
@@ -76,7 +75,6 @@ union map_info *dm_get_mapinfo(struct bio *bio)
*/
struct dm_wq_req {
enum {
- DM_WQ_FLUSH_ALL,
DM_WQ_FLUSH_DEFERRED,
} type;
struct work_struct work;
@@ -151,40 +149,40 @@ static struct kmem_cache *_tio_cache;
static int __init local_init(void)
{
- int r;
+ int r = -ENOMEM;
/* allocate a slab for the dm_ios */
_io_cache = KMEM_CACHE(dm_io, 0);
if (!_io_cache)
- return -ENOMEM;
+ return r;
/* allocate a slab for the target ios */
_tio_cache = KMEM_CACHE(dm_target_io, 0);
- if (!_tio_cache) {
- kmem_cache_destroy(_io_cache);
- return -ENOMEM;
- }
+ if (!_tio_cache)
+ goto out_free_io_cache;
r = dm_uevent_init();
- if (r) {
- kmem_cache_destroy(_tio_cache);
- kmem_cache_destroy(_io_cache);
- return r;
- }
+ if (r)
+ goto out_free_tio_cache;
_major = major;
r = register_blkdev(_major, _name);
- if (r < 0) {
- kmem_cache_destroy(_tio_cache);
- kmem_cache_destroy(_io_cache);
- dm_uevent_exit();
- return r;
- }
+ if (r < 0)
+ goto out_uevent_exit;
if (!_major)
_major = r;
return 0;
+
+out_uevent_exit:
+ dm_uevent_exit();
+out_free_tio_cache:
+ kmem_cache_destroy(_tio_cache);
+out_free_io_cache:
+ kmem_cache_destroy(_io_cache);
+
+ return r;
}
static void local_exit(void)
@@ -249,13 +247,13 @@ static void __exit dm_exit(void)
/*
* Block device functions
*/
-static int dm_blk_open(struct inode *inode, struct file *file)
+static int dm_blk_open(struct block_device *bdev, fmode_t mode)
{
struct mapped_device *md;
spin_lock(&_minor_lock);
- md = inode->i_bdev->bd_disk->private_data;
+ md = bdev->bd_disk->private_data;
if (!md)
goto out;
@@ -274,11 +272,9 @@ out:
return md ? 0 : -ENXIO;
}
-static int dm_blk_close(struct inode *inode, struct file *file)
+static int dm_blk_close(struct gendisk *disk, fmode_t mode)
{
- struct mapped_device *md;
-
- md = inode->i_bdev->bd_disk->private_data;
+ struct mapped_device *md = disk->private_data;
atomic_dec(&md->open_count);
dm_put(md);
return 0;
@@ -315,21 +311,14 @@ static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
return dm_get_geometry(md, geo);
}
-static int dm_blk_ioctl(struct inode *inode, struct file *file,
+static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg)
{
- struct mapped_device *md;
- struct dm_table *map;
+ struct mapped_device *md = bdev->bd_disk->private_data;
+ struct dm_table *map = dm_get_table(md);
struct dm_target *tgt;
int r = -ENOTTY;
- /* We don't really need this lock, but we do need 'inode'. */
- unlock_kernel();
-
- md = inode->i_bdev->bd_disk->private_data;
-
- map = dm_get_table(md);
-
if (!map || !dm_table_get_size(map))
goto out;
@@ -345,12 +334,11 @@ static int dm_blk_ioctl(struct inode *inode, struct file *file,
}
if (tgt->type->ioctl)
- r = tgt->type->ioctl(tgt, inode, file, cmd, arg);
+ r = tgt->type->ioctl(tgt, cmd, arg);
out:
dm_table_put(map);
- lock_kernel();
return r;
}
@@ -377,13 +365,14 @@ static void free_tio(struct mapped_device *md, struct dm_target_io *tio)
static void start_io_acct(struct dm_io *io)
{
struct mapped_device *md = io->md;
+ int cpu;
io->start_time = jiffies;
- preempt_disable();
- disk_round_stats(dm_disk(md));
- preempt_enable();
- dm_disk(md)->in_flight = atomic_inc_return(&md->pending);
+ cpu = part_stat_lock();
+ part_round_stats(cpu, &dm_disk(md)->part0);
+ part_stat_unlock();
+ dm_disk(md)->part0.in_flight = atomic_inc_return(&md->pending);
}
static int end_io_acct(struct dm_io *io)
@@ -391,15 +380,16 @@ static int end_io_acct(struct dm_io *io)
struct mapped_device *md = io->md;
struct bio *bio = io->bio;
unsigned long duration = jiffies - io->start_time;
- int pending;
+ int pending, cpu;
int rw = bio_data_dir(bio);
- preempt_disable();
- disk_round_stats(dm_disk(md));
- preempt_enable();
- dm_disk(md)->in_flight = pending = atomic_dec_return(&md->pending);
+ cpu = part_stat_lock();
+ part_round_stats(cpu, &dm_disk(md)->part0);
+ part_stat_add(cpu, &dm_disk(md)->part0, ticks[rw], duration);
+ part_stat_unlock();
- disk_stat_add(dm_disk(md), ticks[rw], duration);
+ dm_disk(md)->part0.in_flight = pending =
+ atomic_dec_return(&md->pending);
return !pending;
}
@@ -667,6 +657,7 @@ static struct bio *split_bvec(struct bio *bio, sector_t sector,
clone->bi_size = to_bytes(len);
clone->bi_io_vec->bv_offset = offset;
clone->bi_io_vec->bv_len = clone->bi_size;
+ clone->bi_flags |= 1 << BIO_CLONED;
return clone;
}
@@ -829,6 +820,53 @@ static int __split_bio(struct mapped_device *md, struct bio *bio)
* CRUD END
*---------------------------------------------------------------*/
+static int dm_merge_bvec(struct request_queue *q,
+ struct bvec_merge_data *bvm,
+ struct bio_vec *biovec)
+{
+ struct mapped_device *md = q->queuedata;
+ struct dm_table *map = dm_get_table(md);
+ struct dm_target *ti;
+ sector_t max_sectors;
+ int max_size = 0;
+
+ if (unlikely(!map))
+ goto out;
+
+ ti = dm_table_find_target(map, bvm->bi_sector);
+ if (!dm_target_is_valid(ti))
+ goto out_table;
+
+ /*
+ * Find maximum amount of I/O that won't need splitting
+ */
+ max_sectors = min(max_io_len(md, bvm->bi_sector, ti),
+ (sector_t) BIO_MAX_SECTORS);
+ max_size = (max_sectors << SECTOR_SHIFT) - bvm->bi_size;
+ if (max_size < 0)
+ max_size = 0;
+
+ /*
+ * merge_bvec_fn() returns number of bytes
+ * it can accept at this offset
+ * max is precomputed maximal io size
+ */
+ if (max_size && ti->type->merge)
+ max_size = ti->type->merge(ti, bvm, biovec, max_size);
+
+out_table:
+ dm_table_put(map);
+
+out:
+ /*
+ * Always allow an entire first page
+ */
+ if (max_size <= biovec->bv_len && !(bvm->bi_size >> SECTOR_SHIFT))
+ max_size = biovec->bv_len;
+
+ return max_size;
+}
+
/*
* The request function that just remaps the bio built up by
* dm_merge_bvec.
@@ -838,6 +876,7 @@ static int dm_request(struct request_queue *q, struct bio *bio)
int r = -EIO;
int rw = bio_data_dir(bio);
struct mapped_device *md = q->queuedata;
+ int cpu;
/*
* There is no use in forwarding any barrier request since we can't
@@ -850,8 +889,10 @@ static int dm_request(struct request_queue *q, struct bio *bio)
down_read(&md->io_lock);
- disk_stat_inc(dm_disk(md), ios[rw]);
- disk_stat_add(dm_disk(md), sectors[rw], bio_sectors(bio));
+ cpu = part_stat_lock();
+ part_stat_inc(cpu, &dm_disk(md)->part0, ios[rw]);
+ part_stat_add(cpu, &dm_disk(md)->part0, sectors[rw], bio_sectors(bio));
+ part_stat_unlock();
/*
* If we're suspended we have to queue
@@ -1032,6 +1073,7 @@ static struct mapped_device *alloc_dev(int minor)
blk_queue_make_request(md->queue, dm_request);
blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
md->queue->unplug_fn = dm_unplug_all;
+ blk_queue_merge_bvec(md->queue, dm_merge_bvec);
md->io_pool = mempool_create_slab_pool(MIN_IOS, _io_cache);
if (!md->io_pool)
@@ -1098,7 +1140,7 @@ static void unlock_fs(struct mapped_device *md);
static void free_dev(struct mapped_device *md)
{
- int minor = md->disk->first_minor;
+ int minor = MINOR(disk_devt(md->disk));
if (md->suspended_bdev) {
unlock_fs(md);
@@ -1134,7 +1176,7 @@ static void event_callback(void *context)
list_splice_init(&md->uevent_list, &uevents);
spin_unlock_irqrestore(&md->uevent_lock, flags);
- dm_send_uevents(&uevents, &md->disk->dev.kobj);
+ dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
atomic_inc(&md->event_nr);
wake_up(&md->eventq);
@@ -1219,7 +1261,7 @@ static struct mapped_device *dm_find_md(dev_t dev)
md = idr_find(&_minor_idr, minor);
if (md && (md == MINOR_ALLOCED ||
- (dm_disk(md)->first_minor != minor) ||
+ (MINOR(disk_devt(dm_disk(md))) != minor) ||
test_bit(DMF_FREEING, &md->flags))) {
md = NULL;
goto out;
@@ -1270,7 +1312,8 @@ void dm_put(struct mapped_device *md)
if (atomic_dec_and_lock(&md->holders, &_minor_lock)) {
map = dm_get_table(md);
- idr_replace(&_minor_idr, MINOR_ALLOCED, dm_disk(md)->first_minor);
+ idr_replace(&_minor_idr, MINOR_ALLOCED,
+ MINOR(disk_devt(dm_disk(md))));
set_bit(DMF_FREEING, &md->flags);
spin_unlock(&_minor_lock);
if (!dm_suspended(md)) {
@@ -1340,9 +1383,6 @@ static void dm_wq_work(struct work_struct *work)
down_write(&md->io_lock);
switch (req->type) {
- case DM_WQ_FLUSH_ALL:
- __merge_pushback_list(md);
- /* pass through */
case DM_WQ_FLUSH_DEFERRED:
__flush_deferred_io(md);
break;
@@ -1472,7 +1512,7 @@ int dm_suspend(struct mapped_device *md, unsigned suspend_flags)
if (!md->suspended_bdev) {
DMWARN("bdget failed in dm_suspend");
r = -ENOMEM;
- goto flush_and_out;
+ goto out;
}
/*
@@ -1523,14 +1563,6 @@ int dm_suspend(struct mapped_device *md, unsigned suspend_flags)
set_bit(DMF_SUSPENDED, &md->flags);
-flush_and_out:
- if (r && noflush)
- /*
- * Because there may be already I/Os in the pushback list,
- * flush them before return.
- */
- dm_queue_flush(md, DM_WQ_FLUSH_ALL, NULL);
-
out:
if (r && md->suspended_bdev) {
bdput(md->suspended_bdev);
@@ -1590,7 +1622,7 @@ out:
*---------------------------------------------------------------*/
void dm_kobject_uevent(struct mapped_device *md)
{
- kobject_uevent(&md->disk->dev.kobj, KOBJ_CHANGE);
+ kobject_uevent(&disk_to_dev(md->disk)->kobj, KOBJ_CHANGE);
}
uint32_t dm_next_uevent_seq(struct mapped_device *md)
diff --git a/drivers/md/dm.h b/drivers/md/dm.h
index 8c03b634e62e..0ade60cdef42 100644
--- a/drivers/md/dm.h
+++ b/drivers/md/dm.h
@@ -25,13 +25,10 @@
/*
* List of devices that a metadevice uses and should open/close.
*/
-struct dm_dev {
+struct dm_dev_internal {
struct list_head list;
-
atomic_t count;
- int mode;
- struct block_device *bdev;
- char name[16];
+ struct dm_dev dm_dev;
};
struct dm_table;
@@ -49,7 +46,6 @@ void dm_table_presuspend_targets(struct dm_table *t);
void dm_table_postsuspend_targets(struct dm_table *t);
int dm_table_resume_targets(struct dm_table *t);
int dm_table_any_congested(struct dm_table *t, int bdi_bits);
-void dm_table_unplug_all(struct dm_table *t);
/*
* To check the return value from dm_table_find_target().
@@ -66,15 +62,6 @@ void dm_put_target_type(struct target_type *t);
int dm_target_iterate(void (*iter_func)(struct target_type *tt,
void *param), void *param);
-/*-----------------------------------------------------------------
- * Useful inlines.
- *---------------------------------------------------------------*/
-static inline int array_too_big(unsigned long fixed, unsigned long obj,
- unsigned long num)
-{
- return (num > (ULONG_MAX - fixed) / obj);
-}
-
int dm_split_args(int *argc, char ***argvp, char *input);
/*
@@ -93,19 +80,11 @@ void dm_linear_exit(void);
int dm_stripe_init(void);
void dm_stripe_exit(void);
-void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size);
-union map_info *dm_get_mapinfo(struct bio *bio);
int dm_open_count(struct mapped_device *md);
int dm_lock_for_deletion(struct mapped_device *md);
void dm_kobject_uevent(struct mapped_device *md);
-/*
- * Dirty log
- */
-int dm_dirty_log_init(void);
-void dm_dirty_log_exit(void);
-
int dm_kcopyd_init(void);
void dm_kcopyd_exit(void);
diff --git a/drivers/md/faulty.c b/drivers/md/faulty.c
index d107ddceefcd..f26c1f9a475b 100644
--- a/drivers/md/faulty.c
+++ b/drivers/md/faulty.c
@@ -287,6 +287,8 @@ static int run(mddev_t *mddev)
int i;
conf_t *conf = kmalloc(sizeof(*conf), GFP_KERNEL);
+ if (!conf)
+ return -ENOMEM;
for (i=0; i<Modes; i++) {
atomic_set(&conf->counters[i], 0);
@@ -297,7 +299,7 @@ static int run(mddev_t *mddev)
rdev_for_each(rdev, tmp, mddev)
conf->rdev = rdev;
- mddev->array_size = mddev->size;
+ mddev->array_sectors = mddev->size * 2;
mddev->private = conf;
reconfig(mddev, mddev->layout, -1);
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index 6a866d7c8ae5..190147c79e79 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -16,16 +16,8 @@
Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
-#include <linux/module.h>
-
-#include <linux/raid/md.h>
-#include <linux/slab.h>
#include <linux/raid/linear.h>
-#define MAJOR_NR MD_MAJOR
-#define MD_DRIVER
-#define MD_PERSONALITY
-
/*
* find which device holds a particular offset
*/
@@ -33,16 +25,15 @@ static inline dev_info_t *which_dev(mddev_t *mddev, sector_t sector)
{
dev_info_t *hash;
linear_conf_t *conf = mddev_to_conf(mddev);
- sector_t block = sector >> 1;
/*
* sector_div(a,b) returns the remainer and sets a to a/b
*/
- block >>= conf->preshift;
- (void)sector_div(block, conf->hash_spacing);
- hash = conf->hash_table[block];
+ sector >>= conf->sector_shift;
+ (void)sector_div(sector, conf->spacing);
+ hash = conf->hash_table[sector];
- while ((sector>>1) >= (hash->size + hash->offset))
+ while (sector >= hash->num_sectors + hash->start_sector)
hash++;
return hash;
}
@@ -65,7 +56,7 @@ static int linear_mergeable_bvec(struct request_queue *q,
sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
dev0 = which_dev(mddev, sector);
- maxsectors = (dev0->size << 1) - (sector - (dev0->offset<<1));
+ maxsectors = dev0->num_sectors - (sector - dev0->start_sector);
if (maxsectors < bio_sectors)
maxsectors = 0;
@@ -112,8 +103,8 @@ static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks)
dev_info_t **table;
mdk_rdev_t *rdev;
int i, nb_zone, cnt;
- sector_t min_spacing;
- sector_t curr_offset;
+ sector_t min_sectors;
+ sector_t curr_sector;
struct list_head *tmp;
conf = kzalloc (sizeof (*conf) + raid_disks*sizeof(dev_info_t),
@@ -122,13 +113,13 @@ static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks)
return NULL;
cnt = 0;
- conf->array_size = 0;
+ conf->array_sectors = 0;
rdev_for_each(rdev, tmp, mddev) {
int j = rdev->raid_disk;
dev_info_t *disk = conf->disks + j;
- if (j < 0 || j > raid_disks || disk->rdev) {
+ if (j < 0 || j >= raid_disks || disk->rdev) {
printk("linear: disk numbering problem. Aborting!\n");
goto out;
}
@@ -145,8 +136,8 @@ static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks)
mddev->queue->max_sectors > (PAGE_SIZE>>9))
blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
- disk->size = rdev->size;
- conf->array_size += rdev->size;
+ disk->num_sectors = rdev->size * 2;
+ conf->array_sectors += rdev->size * 2;
cnt++;
}
@@ -155,34 +146,34 @@ static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks)
goto out;
}
- min_spacing = conf->array_size;
- sector_div(min_spacing, PAGE_SIZE/sizeof(struct dev_info *));
+ min_sectors = conf->array_sectors;
+ sector_div(min_sectors, PAGE_SIZE/sizeof(struct dev_info *));
- /* min_spacing is the minimum spacing that will fit the hash
+ /* min_sectors is the minimum spacing that will fit the hash
* table in one PAGE. This may be much smaller than needed.
* We find the smallest non-terminal set of consecutive devices
- * that is larger than min_spacing as use the size of that as
+ * that is larger than min_sectors and use the size of that as
* the actual spacing
*/
- conf->hash_spacing = conf->array_size;
+ conf->spacing = conf->array_sectors;
for (i=0; i < cnt-1 ; i++) {
- sector_t sz = 0;
+ sector_t tmp = 0;
int j;
- for (j = i; j < cnt - 1 && sz < min_spacing; j++)
- sz += conf->disks[j].size;
- if (sz >= min_spacing && sz < conf->hash_spacing)
- conf->hash_spacing = sz;
+ for (j = i; j < cnt - 1 && tmp < min_sectors; j++)
+ tmp += conf->disks[j].num_sectors;
+ if (tmp >= min_sectors && tmp < conf->spacing)
+ conf->spacing = tmp;
}
- /* hash_spacing may be too large for sector_div to work with,
+ /* spacing may be too large for sector_div to work with,
* so we might need to pre-shift
*/
- conf->preshift = 0;
+ conf->sector_shift = 0;
if (sizeof(sector_t) > sizeof(u32)) {
- sector_t space = conf->hash_spacing;
+ sector_t space = conf->spacing;
while (space > (sector_t)(~(u32)0)) {
space >>= 1;
- conf->preshift++;
+ conf->sector_shift++;
}
}
/*
@@ -194,9 +185,9 @@ static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks)
unsigned round;
unsigned long base;
- sz = conf->array_size >> conf->preshift;
+ sz = conf->array_sectors >> conf->sector_shift;
sz += 1; /* force round-up */
- base = conf->hash_spacing >> conf->preshift;
+ base = conf->spacing >> conf->sector_shift;
round = sector_div(sz, base);
nb_zone = sz + (round ? 1 : 0);
}
@@ -211,32 +202,31 @@ static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks)
* Here we generate the linear hash table
* First calculate the device offsets.
*/
- conf->disks[0].offset = 0;
+ conf->disks[0].start_sector = 0;
for (i = 1; i < raid_disks; i++)
- conf->disks[i].offset =
- conf->disks[i-1].offset +
- conf->disks[i-1].size;
+ conf->disks[i].start_sector =
+ conf->disks[i-1].start_sector +
+ conf->disks[i-1].num_sectors;
table = conf->hash_table;
- curr_offset = 0;
i = 0;
- for (curr_offset = 0;
- curr_offset < conf->array_size;
- curr_offset += conf->hash_spacing) {
+ for (curr_sector = 0;
+ curr_sector < conf->array_sectors;
+ curr_sector += conf->spacing) {
while (i < raid_disks-1 &&
- curr_offset >= conf->disks[i+1].offset)
+ curr_sector >= conf->disks[i+1].start_sector)
i++;
*table ++ = conf->disks + i;
}
- if (conf->preshift) {
- conf->hash_spacing >>= conf->preshift;
- /* round hash_spacing up so that when we divide by it,
+ if (conf->sector_shift) {
+ conf->spacing >>= conf->sector_shift;
+ /* round spacing up so that when we divide by it,
* we err on the side of "too-low", which is safest.
*/
- conf->hash_spacing++;
+ conf->spacing++;
}
BUG_ON(table - conf->hash_table > nb_zone);
@@ -258,7 +248,7 @@ static int linear_run (mddev_t *mddev)
if (!conf)
return 1;
mddev->private = conf;
- mddev->array_size = conf->array_size;
+ mddev->array_sectors = conf->array_sectors;
blk_queue_merge_bvec(mddev->queue, linear_mergeable_bvec);
mddev->queue->unplug_fn = linear_unplug;
@@ -292,8 +282,8 @@ static int linear_add(mddev_t *mddev, mdk_rdev_t *rdev)
newconf->prev = mddev_to_conf(mddev);
mddev->private = newconf;
mddev->raid_disks++;
- mddev->array_size = newconf->array_size;
- set_capacity(mddev->gendisk, mddev->array_size << 1);
+ mddev->array_sectors = newconf->array_sectors;
+ set_capacity(mddev->gendisk, mddev->array_sectors);
return 0;
}
@@ -317,40 +307,47 @@ static int linear_make_request (struct request_queue *q, struct bio *bio)
const int rw = bio_data_dir(bio);
mddev_t *mddev = q->queuedata;
dev_info_t *tmp_dev;
- sector_t block;
+ int cpu;
if (unlikely(bio_barrier(bio))) {
bio_endio(bio, -EOPNOTSUPP);
return 0;
}
- disk_stat_inc(mddev->gendisk, ios[rw]);
- disk_stat_add(mddev->gendisk, sectors[rw], bio_sectors(bio));
+ cpu = part_stat_lock();
+ part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
+ part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw],
+ bio_sectors(bio));
+ part_stat_unlock();
tmp_dev = which_dev(mddev, bio->bi_sector);
- block = bio->bi_sector >> 1;
- if (unlikely(block >= (tmp_dev->size + tmp_dev->offset)
- || block < tmp_dev->offset)) {
+ if (unlikely(bio->bi_sector >= (tmp_dev->num_sectors +
+ tmp_dev->start_sector)
+ || (bio->bi_sector <
+ tmp_dev->start_sector))) {
char b[BDEVNAME_SIZE];
- printk("linear_make_request: Block %llu out of bounds on "
- "dev %s size %llu offset %llu\n",
- (unsigned long long)block,
+ printk("linear_make_request: Sector %llu out of bounds on "
+ "dev %s: %llu sectors, offset %llu\n",
+ (unsigned long long)bio->bi_sector,
bdevname(tmp_dev->rdev->bdev, b),
- (unsigned long long)tmp_dev->size,
- (unsigned long long)tmp_dev->offset);
+ (unsigned long long)tmp_dev->num_sectors,
+ (unsigned long long)tmp_dev->start_sector);
bio_io_error(bio);
return 0;
}
if (unlikely(bio->bi_sector + (bio->bi_size >> 9) >
- (tmp_dev->offset + tmp_dev->size)<<1)) {
+ tmp_dev->start_sector + tmp_dev->num_sectors)) {
/* This bio crosses a device boundary, so we have to
* split it.
*/
struct bio_pair *bp;
- bp = bio_split(bio, bio_split_pool,
- ((tmp_dev->offset + tmp_dev->size)<<1) - bio->bi_sector);
+
+ bp = bio_split(bio,
+ tmp_dev->start_sector + tmp_dev->num_sectors
+ - bio->bi_sector);
+
if (linear_make_request(q, &bp->bio1))
generic_make_request(&bp->bio1);
if (linear_make_request(q, &bp->bio2))
@@ -360,7 +357,8 @@ static int linear_make_request (struct request_queue *q, struct bio *bio)
}
bio->bi_bdev = tmp_dev->rdev->bdev;
- bio->bi_sector = bio->bi_sector - (tmp_dev->offset << 1) + tmp_dev->rdev->data_offset;
+ bio->bi_sector = bio->bi_sector - tmp_dev->start_sector
+ + tmp_dev->rdev->data_offset;
return 1;
}
@@ -368,29 +366,6 @@ static int linear_make_request (struct request_queue *q, struct bio *bio)
static void linear_status (struct seq_file *seq, mddev_t *mddev)
{
-#undef MD_DEBUG
-#ifdef MD_DEBUG
- int j;
- linear_conf_t *conf = mddev_to_conf(mddev);
- sector_t s = 0;
-
- seq_printf(seq, " ");
- for (j = 0; j < mddev->raid_disks; j++)
- {
- char b[BDEVNAME_SIZE];
- s += conf->smallest_size;
- seq_printf(seq, "[%s",
- bdevname(conf->hash_table[j][0].rdev->bdev,b));
-
- while (s > conf->hash_table[j][0].offset +
- conf->hash_table[j][0].size)
- seq_printf(seq, "/%s] ",
- bdevname(conf->hash_table[j][1].rdev->bdev,b));
- else
- seq_printf(seq, "] ");
- }
- seq_printf(seq, "\n");
-#endif
seq_printf(seq, " %dk rounding", mddev->chunk_size/1024);
}
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 2580ac1b9b0f..c1a837ca193c 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -32,31 +32,21 @@
Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
-#include <linux/module.h>
-#include <linux/kernel.h>
#include <linux/kthread.h>
-#include <linux/linkage.h>
#include <linux/raid/md.h>
#include <linux/raid/bitmap.h>
#include <linux/sysctl.h>
#include <linux/buffer_head.h> /* for invalidate_bdev */
#include <linux/poll.h>
-#include <linux/mutex.h>
#include <linux/ctype.h>
-#include <linux/freezer.h>
-
-#include <linux/init.h>
-
+#include <linux/hdreg.h>
+#include <linux/proc_fs.h>
+#include <linux/random.h>
+#include <linux/reboot.h>
#include <linux/file.h>
-
-#ifdef CONFIG_KMOD
-#include <linux/kmod.h>
-#endif
-
-#include <asm/unaligned.h>
+#include <linux/delay.h>
#define MAJOR_NR MD_MAJOR
-#define MD_DRIVER
/* 63 partitions with the alternate major number (mdp) */
#define MdpMinorShift 6
@@ -66,7 +56,7 @@
#ifndef MODULE
-static void autostart_arrays (int part);
+static void autostart_arrays(int part);
#endif
static LIST_HEAD(pers_list);
@@ -169,7 +159,6 @@ void md_new_event(mddev_t *mddev)
{
atomic_inc(&md_event_count);
wake_up(&md_event_waiters);
- sysfs_notify(&mddev->kobj, NULL, "sync_action");
}
EXPORT_SYMBOL_GPL(md_new_event);
@@ -213,7 +202,7 @@ static DEFINE_SPINLOCK(all_mddevs_lock);
)
-static int md_fail_request (struct request_queue *q, struct bio *bio)
+static int md_fail_request(struct request_queue *q, struct bio *bio)
{
bio_io_error(bio);
return 0;
@@ -274,10 +263,12 @@ static mddev_t * mddev_find(dev_t unit)
INIT_LIST_HEAD(&new->all_mddevs);
init_timer(&new->safemode_timer);
atomic_set(&new->active, 1);
+ atomic_set(&new->openers, 0);
spin_lock_init(&new->write_lock);
init_waitqueue_head(&new->sb_wait);
init_waitqueue_head(&new->recovery_wait);
new->reshape_position = MaxSector;
+ new->resync_min = 0;
new->resync_max = MaxSector;
new->level = LEVEL_NONE;
@@ -347,21 +338,20 @@ static struct mdk_personality *find_pers(int level, char *clevel)
return NULL;
}
+/* return the offset of the super block in 512byte sectors */
static inline sector_t calc_dev_sboffset(struct block_device *bdev)
{
- sector_t size = bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
- return MD_NEW_SIZE_BLOCKS(size);
+ sector_t num_sectors = bdev->bd_inode->i_size / 512;
+ return MD_NEW_SIZE_SECTORS(num_sectors);
}
-static sector_t calc_dev_size(mdk_rdev_t *rdev, unsigned chunk_size)
+static sector_t calc_num_sectors(mdk_rdev_t *rdev, unsigned chunk_size)
{
- sector_t size;
-
- size = rdev->sb_offset;
+ sector_t num_sectors = rdev->sb_start;
if (chunk_size)
- size &= ~((sector_t)chunk_size/1024 - 1);
- return size;
+ num_sectors &= ~((sector_t)chunk_size/512 - 1);
+ return num_sectors;
}
static int alloc_disk_sb(mdk_rdev_t * rdev)
@@ -372,7 +362,7 @@ static int alloc_disk_sb(mdk_rdev_t * rdev)
rdev->sb_page = alloc_page(GFP_KERNEL);
if (!rdev->sb_page) {
printk(KERN_ALERT "md: out of memory.\n");
- return -EINVAL;
+ return -ENOMEM;
}
return 0;
@@ -384,7 +374,7 @@ static void free_disk_sb(mdk_rdev_t * rdev)
put_page(rdev->sb_page);
rdev->sb_loaded = 0;
rdev->sb_page = NULL;
- rdev->sb_offset = 0;
+ rdev->sb_start = 0;
rdev->size = 0;
}
}
@@ -530,7 +520,7 @@ static int read_disk_sb(mdk_rdev_t * rdev, int size)
return 0;
- if (!sync_page_io(rdev->bdev, rdev->sb_offset<<1, size, rdev->sb_page, READ))
+ if (!sync_page_io(rdev->bdev, rdev->sb_start, size, rdev->sb_page, READ))
goto fail;
rdev->sb_loaded = 1;
return 0;
@@ -543,17 +533,12 @@ fail:
static int uuid_equal(mdp_super_t *sb1, mdp_super_t *sb2)
{
- if ( (sb1->set_uuid0 == sb2->set_uuid0) &&
- (sb1->set_uuid1 == sb2->set_uuid1) &&
- (sb1->set_uuid2 == sb2->set_uuid2) &&
- (sb1->set_uuid3 == sb2->set_uuid3))
-
- return 1;
-
- return 0;
+ return sb1->set_uuid0 == sb2->set_uuid0 &&
+ sb1->set_uuid1 == sb2->set_uuid1 &&
+ sb1->set_uuid2 == sb2->set_uuid2 &&
+ sb1->set_uuid3 == sb2->set_uuid3;
}
-
static int sb_equal(mdp_super_t *sb1, mdp_super_t *sb2)
{
int ret;
@@ -564,7 +549,7 @@ static int sb_equal(mdp_super_t *sb1, mdp_super_t *sb2)
if (!tmp1 || !tmp2) {
ret = 0;
- printk(KERN_INFO "md.c: sb1 is not equal to sb2!\n");
+ printk(KERN_INFO "md.c sb_equal(): failed to allocate memory!\n");
goto abort;
}
@@ -577,11 +562,7 @@ static int sb_equal(mdp_super_t *sb1, mdp_super_t *sb2)
tmp1->nr_disks = 0;
tmp2->nr_disks = 0;
- if (memcmp(tmp1, tmp2, MD_SB_GENERIC_CONSTANT_WORDS * 4))
- ret = 0;
- else
- ret = 1;
-
+ ret = (memcmp(tmp1, tmp2, MD_SB_GENERIC_CONSTANT_WORDS * 4) == 0);
abort:
kfree(tmp1);
kfree(tmp2);
@@ -658,11 +639,14 @@ static unsigned int calc_sb_csum(mdp_super_t * sb)
*/
struct super_type {
- char *name;
- struct module *owner;
- int (*load_super)(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version);
- int (*validate_super)(mddev_t *mddev, mdk_rdev_t *rdev);
- void (*sync_super)(mddev_t *mddev, mdk_rdev_t *rdev);
+ char *name;
+ struct module *owner;
+ int (*load_super)(mdk_rdev_t *rdev, mdk_rdev_t *refdev,
+ int minor_version);
+ int (*validate_super)(mddev_t *mddev, mdk_rdev_t *rdev);
+ void (*sync_super)(mddev_t *mddev, mdk_rdev_t *rdev);
+ unsigned long long (*rdev_size_change)(mdk_rdev_t *rdev,
+ sector_t num_sectors);
};
/*
@@ -673,16 +657,14 @@ static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version
char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
mdp_super_t *sb;
int ret;
- sector_t sb_offset;
/*
- * Calculate the position of the superblock,
+ * Calculate the position of the superblock (512byte sectors),
* it's at the end of the disk.
*
* It also happens to be a multiple of 4Kb.
*/
- sb_offset = calc_dev_sboffset(rdev->bdev);
- rdev->sb_offset = sb_offset;
+ rdev->sb_start = calc_dev_sboffset(rdev->bdev);
ret = read_disk_sb(rdev, MD_SB_BYTES);
if (ret) return ret;
@@ -759,7 +741,7 @@ static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version
else
ret = 0;
}
- rdev->size = calc_dev_size(rdev, sb->chunk_size);
+ rdev->size = calc_num_sectors(rdev, sb->chunk_size) / 2;
if (rdev->size < sb->size && sb->level > 1)
/* "this cannot possibly happen" ... */
@@ -1004,6 +986,26 @@ static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev)
}
/*
+ * rdev_size_change for 0.90.0
+ */
+static unsigned long long
+super_90_rdev_size_change(mdk_rdev_t *rdev, sector_t num_sectors)
+{
+ if (num_sectors && num_sectors < rdev->mddev->size * 2)
+ return 0; /* component must fit device */
+ if (rdev->mddev->bitmap_offset)
+ return 0; /* can't move bitmap */
+ rdev->sb_start = calc_dev_sboffset(rdev->bdev);
+ if (!num_sectors || num_sectors > rdev->sb_start)
+ num_sectors = rdev->sb_start;
+ md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
+ rdev->sb_page);
+ md_super_wait(rdev->mddev);
+ return num_sectors / 2; /* kB for sysfs */
+}
+
+
+/*
* version 1 superblock
*/
@@ -1034,12 +1036,12 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
{
struct mdp_superblock_1 *sb;
int ret;
- sector_t sb_offset;
+ sector_t sb_start;
char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
int bmask;
/*
- * Calculate the position of the superblock.
+ * Calculate the position of the superblock in 512byte sectors.
* It is always aligned to a 4K boundary and
* depeding on minor_version, it can be:
* 0: At least 8K, but less than 12K, from end of device
@@ -1048,22 +1050,20 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
*/
switch(minor_version) {
case 0:
- sb_offset = rdev->bdev->bd_inode->i_size >> 9;
- sb_offset -= 8*2;
- sb_offset &= ~(sector_t)(4*2-1);
- /* convert from sectors to K */
- sb_offset /= 2;
+ sb_start = rdev->bdev->bd_inode->i_size >> 9;
+ sb_start -= 8*2;
+ sb_start &= ~(sector_t)(4*2-1);
break;
case 1:
- sb_offset = 0;
+ sb_start = 0;
break;
case 2:
- sb_offset = 4;
+ sb_start = 8;
break;
default:
return -EINVAL;
}
- rdev->sb_offset = sb_offset;
+ rdev->sb_start = sb_start;
/* superblock is rarely larger than 1K, but it can be larger,
* and it is safe to read 4k, so we do that
@@ -1077,7 +1077,7 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
if (sb->magic != cpu_to_le32(MD_SB_MAGIC) ||
sb->major_version != cpu_to_le32(1) ||
le32_to_cpu(sb->max_dev) > (4096-256)/2 ||
- le64_to_cpu(sb->super_offset) != (rdev->sb_offset<<1) ||
+ le64_to_cpu(sb->super_offset) != rdev->sb_start ||
(le32_to_cpu(sb->feature_map) & ~MD_FEATURE_ALL) != 0)
return -EINVAL;
@@ -1113,7 +1113,7 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
rdev->sb_size = (rdev->sb_size | bmask) + 1;
if (minor_version
- && rdev->data_offset < sb_offset + (rdev->sb_size/512))
+ && rdev->data_offset < sb_start + (rdev->sb_size/512))
return -EINVAL;
if (sb->level == cpu_to_le32(LEVEL_MULTIPATH))
@@ -1149,7 +1149,7 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
if (minor_version)
rdev->size = ((rdev->bdev->bd_inode->i_size>>9) - le64_to_cpu(sb->data_offset)) / 2;
else
- rdev->size = rdev->sb_offset;
+ rdev->size = rdev->sb_start / 2;
if (rdev->size < le64_to_cpu(sb->data_size)/2)
return -EINVAL;
rdev->size = le64_to_cpu(sb->data_size)/2;
@@ -1328,35 +1328,74 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
sb->sb_csum = calc_sb_1_csum(sb);
}
+static unsigned long long
+super_1_rdev_size_change(mdk_rdev_t *rdev, sector_t num_sectors)
+{
+ struct mdp_superblock_1 *sb;
+ sector_t max_sectors;
+ if (num_sectors && num_sectors < rdev->mddev->size * 2)
+ return 0; /* component must fit device */
+ if (rdev->sb_start < rdev->data_offset) {
+ /* minor versions 1 and 2; superblock before data */
+ max_sectors = rdev->bdev->bd_inode->i_size >> 9;
+ max_sectors -= rdev->data_offset;
+ if (!num_sectors || num_sectors > max_sectors)
+ num_sectors = max_sectors;
+ } else if (rdev->mddev->bitmap_offset) {
+ /* minor version 0 with bitmap we can't move */
+ return 0;
+ } else {
+ /* minor version 0; superblock after data */
+ sector_t sb_start;
+ sb_start = (rdev->bdev->bd_inode->i_size >> 9) - 8*2;
+ sb_start &= ~(sector_t)(4*2 - 1);
+ max_sectors = rdev->size * 2 + sb_start - rdev->sb_start;
+ if (!num_sectors || num_sectors > max_sectors)
+ num_sectors = max_sectors;
+ rdev->sb_start = sb_start;
+ }
+ sb = (struct mdp_superblock_1 *) page_address(rdev->sb_page);
+ sb->data_size = cpu_to_le64(num_sectors);
+ sb->super_offset = rdev->sb_start;
+ sb->sb_csum = calc_sb_1_csum(sb);
+ md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
+ rdev->sb_page);
+ md_super_wait(rdev->mddev);
+ return num_sectors / 2; /* kB for sysfs */
+}
static struct super_type super_types[] = {
[0] = {
.name = "0.90.0",
.owner = THIS_MODULE,
- .load_super = super_90_load,
- .validate_super = super_90_validate,
- .sync_super = super_90_sync,
+ .load_super = super_90_load,
+ .validate_super = super_90_validate,
+ .sync_super = super_90_sync,
+ .rdev_size_change = super_90_rdev_size_change,
},
[1] = {
.name = "md-1",
.owner = THIS_MODULE,
- .load_super = super_1_load,
- .validate_super = super_1_validate,
- .sync_super = super_1_sync,
+ .load_super = super_1_load,
+ .validate_super = super_1_validate,
+ .sync_super = super_1_sync,
+ .rdev_size_change = super_1_rdev_size_change,
},
};
static int match_mddev_units(mddev_t *mddev1, mddev_t *mddev2)
{
- struct list_head *tmp, *tmp2;
mdk_rdev_t *rdev, *rdev2;
- rdev_for_each(rdev, tmp, mddev1)
- rdev_for_each(rdev2, tmp2, mddev2)
+ rcu_read_lock();
+ rdev_for_each_rcu(rdev, mddev1)
+ rdev_for_each_rcu(rdev2, mddev2)
if (rdev->bdev->bd_contains ==
- rdev2->bdev->bd_contains)
+ rdev2->bdev->bd_contains) {
+ rcu_read_unlock();
return 1;
-
+ }
+ rcu_read_unlock();
return 0;
}
@@ -1415,15 +1454,12 @@ static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev)
if ((err = kobject_add(&rdev->kobj, &mddev->kobj, "dev-%s", b)))
goto fail;
- if (rdev->bdev->bd_part)
- ko = &rdev->bdev->bd_part->dev.kobj;
- else
- ko = &rdev->bdev->bd_disk->dev.kobj;
+ ko = &part_to_dev(rdev->bdev->bd_part)->kobj;
if ((err = sysfs_create_link(&rdev->kobj, ko, "block"))) {
kobject_del(&rdev->kobj);
goto fail;
}
- list_add(&rdev->same_set, &mddev->disks);
+ list_add_rcu(&rdev->same_set, &mddev->disks);
bd_claim_by_disk(rdev->bdev, rdev->bdev->bd_holder, mddev->gendisk);
return 0;
@@ -1448,14 +1484,16 @@ static void unbind_rdev_from_array(mdk_rdev_t * rdev)
return;
}
bd_release_from_disk(rdev->bdev, rdev->mddev->gendisk);
- list_del_init(&rdev->same_set);
+ list_del_rcu(&rdev->same_set);
printk(KERN_INFO "md: unbind<%s>\n", bdevname(rdev->bdev,b));
rdev->mddev = NULL;
sysfs_remove_link(&rdev->kobj, "block");
/* We need to delay this, otherwise we can deadlock when
- * writing to 'remove' to "dev/state"
+ * writing to 'remove' to "dev/state". We also need
+ * to delay it due to rcu usage.
*/
+ synchronize_rcu();
INIT_WORK(&rdev->del_work, md_delayed_delete);
kobject_get(&rdev->kobj);
schedule_work(&rdev->del_work);
@@ -1482,7 +1520,7 @@ static int lock_rdev(mdk_rdev_t *rdev, dev_t dev, int shared)
if (err) {
printk(KERN_ERR "md: could not bd_claim %s.\n",
bdevname(bdev, b));
- blkdev_put(bdev);
+ blkdev_put(bdev, FMODE_READ|FMODE_WRITE);
return err;
}
if (!shared)
@@ -1498,7 +1536,7 @@ static void unlock_rdev(mdk_rdev_t *rdev)
if (!bdev)
MD_BUG();
bd_release(bdev);
- blkdev_put(bdev);
+ blkdev_put(bdev, FMODE_READ|FMODE_WRITE);
}
void md_autodetect_dev(dev_t dev);
@@ -1511,7 +1549,6 @@ static void export_rdev(mdk_rdev_t * rdev)
if (rdev->mddev)
MD_BUG();
free_disk_sb(rdev);
- list_del_init(&rdev->same_set);
#ifndef MODULE
if (test_bit(AutoDetected, &rdev->flags))
md_autodetect_dev(rdev->bdev->bd_dev);
@@ -1758,11 +1795,11 @@ repeat:
dprintk("%s ", bdevname(rdev->bdev,b));
if (!test_bit(Faulty, &rdev->flags)) {
md_super_write(mddev,rdev,
- rdev->sb_offset<<1, rdev->sb_size,
+ rdev->sb_start, rdev->sb_size,
rdev->sb_page);
dprintk(KERN_INFO "(write) %s's sb offset: %llu\n",
bdevname(rdev->bdev,b),
- (unsigned long long)rdev->sb_offset);
+ (unsigned long long)rdev->sb_start);
rdev->sb_events = mddev->events;
} else
@@ -1787,7 +1824,7 @@ repeat:
}
-/* words written to sysfs files may, or my not, be \n terminated.
+/* words written to sysfs files may, or may not, be \n terminated.
* We want to accept with case. For this we use cmd_match.
*/
static int cmd_match(const char *cmd, const char *str)
@@ -1886,6 +1923,8 @@ state_store(mdk_rdev_t *rdev, const char *buf, size_t len)
err = 0;
}
+ if (!err)
+ sysfs_notify(&rdev->kobj, NULL, "state");
return err ? err : len;
}
static struct rdev_sysfs_entry rdev_state =
@@ -1931,7 +1970,7 @@ slot_store(mdk_rdev_t *rdev, const char *buf, size_t len)
slot = -1;
else if (e==buf || (*e && *e!= '\n'))
return -EINVAL;
- if (rdev->mddev->pers) {
+ if (rdev->mddev->pers && slot == -1) {
/* Setting 'slot' on an active array requires also
* updating the 'rd%d' link, and communicating
* with the personality with ->hot_*_disk.
@@ -1939,8 +1978,6 @@ slot_store(mdk_rdev_t *rdev, const char *buf, size_t len)
* failed/spare devices. This normally happens automatically,
* but not when the metadata is externally managed.
*/
- if (slot != -1)
- return -EBUSY;
if (rdev->raid_disk == -1)
return -EEXIST;
/* personality does all needed checks */
@@ -1954,6 +1991,43 @@ slot_store(mdk_rdev_t *rdev, const char *buf, size_t len)
sysfs_remove_link(&rdev->mddev->kobj, nm);
set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
md_wakeup_thread(rdev->mddev->thread);
+ } else if (rdev->mddev->pers) {
+ mdk_rdev_t *rdev2;
+ struct list_head *tmp;
+ /* Activating a spare .. or possibly reactivating
+ * if we every get bitmaps working here.
+ */
+
+ if (rdev->raid_disk != -1)
+ return -EBUSY;
+
+ if (rdev->mddev->pers->hot_add_disk == NULL)
+ return -EINVAL;
+
+ rdev_for_each(rdev2, tmp, rdev->mddev)
+ if (rdev2->raid_disk == slot)
+ return -EEXIST;
+
+ rdev->raid_disk = slot;
+ if (test_bit(In_sync, &rdev->flags))
+ rdev->saved_raid_disk = slot;
+ else
+ rdev->saved_raid_disk = -1;
+ err = rdev->mddev->pers->
+ hot_add_disk(rdev->mddev, rdev);
+ if (err) {
+ rdev->raid_disk = -1;
+ return err;
+ } else
+ sysfs_notify(&rdev->kobj, NULL, "state");
+ sprintf(nm, "rd%d", rdev->raid_disk);
+ if (sysfs_create_link(&rdev->mddev->kobj, &rdev->kobj, nm))
+ printk(KERN_WARNING
+ "md: cannot register "
+ "%s for %s\n",
+ nm, mdname(rdev->mddev));
+
+ /* don't wakeup anyone, leave that to userspace. */
} else {
if (slot >= rdev->mddev->raid_disks)
return -ENOSPC;
@@ -1962,6 +2036,7 @@ slot_store(mdk_rdev_t *rdev, const char *buf, size_t len)
clear_bit(Faulty, &rdev->flags);
clear_bit(WriteMostly, &rdev->flags);
set_bit(In_sync, &rdev->flags);
+ sysfs_notify(&rdev->kobj, NULL, "state");
}
return len;
}
@@ -1983,7 +2058,7 @@ offset_store(mdk_rdev_t *rdev, const char *buf, size_t len)
unsigned long long offset = simple_strtoull(buf, &e, 10);
if (e==buf || (*e && *e != '\n'))
return -EINVAL;
- if (rdev->mddev->pers)
+ if (rdev->mddev->pers && rdev->raid_disk >= 0)
return -EBUSY;
if (rdev->size && rdev->mddev->external)
/* Must set offset before size, so overlap checks
@@ -2015,17 +2090,28 @@ static int overlaps(sector_t s1, sector_t l1, sector_t s2, sector_t l2)
static ssize_t
rdev_size_store(mdk_rdev_t *rdev, const char *buf, size_t len)
{
- char *e;
- unsigned long long size = simple_strtoull(buf, &e, 10);
+ unsigned long long size;
unsigned long long oldsize = rdev->size;
mddev_t *my_mddev = rdev->mddev;
- if (e==buf || (*e && *e != '\n'))
+ if (strict_strtoull(buf, 10, &size) < 0)
return -EINVAL;
- if (my_mddev->pers)
- return -EBUSY;
+ if (my_mddev->pers && rdev->raid_disk >= 0) {
+ if (my_mddev->persistent) {
+ size = super_types[my_mddev->major_version].
+ rdev_size_change(rdev, size * 2);
+ if (!size)
+ return -EBUSY;
+ } else if (!size) {
+ size = (rdev->bdev->bd_inode->i_size >> 10);
+ size -= rdev->data_offset/2;
+ }
+ }
+ if (size < my_mddev->size)
+ return -EINVAL; /* component must fit device */
+
rdev->size = size;
- if (size > oldsize && rdev->mddev->external) {
+ if (size > oldsize && my_mddev->external) {
/* need to check that all other rdevs with the same ->bdev
* do not overlap. We need to unlock the mddev to avoid
* a deadlock. We have already changed rdev->size, and if
@@ -2044,8 +2130,9 @@ rdev_size_store(mdk_rdev_t *rdev, const char *buf, size_t len)
if (test_bit(AllReserved, &rdev2->flags) ||
(rdev->bdev == rdev2->bdev &&
rdev != rdev2 &&
- overlaps(rdev->data_offset, rdev->size,
- rdev2->data_offset, rdev2->size))) {
+ overlaps(rdev->data_offset, rdev->size * 2,
+ rdev2->data_offset,
+ rdev2->size * 2))) {
overlap = 1;
break;
}
@@ -2067,8 +2154,6 @@ rdev_size_store(mdk_rdev_t *rdev, const char *buf, size_t len)
return -EBUSY;
}
}
- if (size < my_mddev->size || my_mddev->size == 0)
- my_mddev->size = size;
return len;
}
@@ -2293,6 +2378,8 @@ static void analyze_sbs(mddev_t * mddev)
}
+static void md_safemode_timeout(unsigned long data);
+
static ssize_t
safe_delay_show(mddev_t *mddev, char *page)
{
@@ -2307,12 +2394,11 @@ safe_delay_store(mddev_t *mddev, const char *cbuf, size_t len)
int i;
unsigned long msec;
char buf[30];
- char *e;
+
/* remove a period, and count digits after it */
if (len >= sizeof(buf))
return -EINVAL;
- strlcpy(buf, cbuf, len);
- buf[len] = 0;
+ strlcpy(buf, cbuf, sizeof(buf));
for (i=0; i<len; i++) {
if (dot) {
if (isdigit(buf[i])) {
@@ -2325,16 +2411,18 @@ safe_delay_store(mddev_t *mddev, const char *cbuf, size_t len)
buf[i] = 0;
}
}
- msec = simple_strtoul(buf, &e, 10);
- if (e == buf || (*e && *e != '\n'))
+ if (strict_strtoul(buf, 10, &msec) < 0)
return -EINVAL;
msec = (msec * 1000) / scale;
if (msec == 0)
mddev->safemode_delay = 0;
else {
+ unsigned long old_delay = mddev->safemode_delay;
mddev->safemode_delay = (msec*HZ)/1000;
if (mddev->safemode_delay == 0)
mddev->safemode_delay = 1;
+ if (mddev->safemode_delay < old_delay)
+ md_safemode_timeout((unsigned long)mddev);
}
return len;
}
@@ -2512,7 +2600,7 @@ __ATTR(resync_start, S_IRUGO|S_IWUSR, resync_start_show, resync_start_store);
* When written, doesn't tear down array, but just stops it
* suspended (not supported yet)
* All IO requests will block. The array can be reconfigured.
- * Writing this, if accepted, will block until array is quiessent
+ * Writing this, if accepted, will block until array is quiescent
* readonly
* no resync can happen. no superblocks get written.
* write requests fail
@@ -2585,7 +2673,7 @@ array_state_show(mddev_t *mddev, char *page)
return sprintf(page, "%s\n", array_states[st]);
}
-static int do_md_stop(mddev_t * mddev, int ro);
+static int do_md_stop(mddev_t * mddev, int ro, int is_open);
static int do_md_run(mddev_t * mddev);
static int restart_array(mddev_t *mddev);
@@ -2599,16 +2687,16 @@ array_state_store(mddev_t *mddev, const char *buf, size_t len)
break;
case clear:
/* stopping an active array */
- if (atomic_read(&mddev->active) > 1)
+ if (atomic_read(&mddev->openers) > 0)
return -EBUSY;
- err = do_md_stop(mddev, 0);
+ err = do_md_stop(mddev, 0, 0);
break;
case inactive:
/* stopping an active array */
if (mddev->pers) {
- if (atomic_read(&mddev->active) > 1)
+ if (atomic_read(&mddev->openers) > 0)
return -EBUSY;
- err = do_md_stop(mddev, 2);
+ err = do_md_stop(mddev, 2, 0);
} else
err = 0; /* already inactive */
break;
@@ -2616,7 +2704,7 @@ array_state_store(mddev_t *mddev, const char *buf, size_t len)
break; /* not supported yet */
case readonly:
if (mddev->pers)
- err = do_md_stop(mddev, 1);
+ err = do_md_stop(mddev, 1, 0);
else {
mddev->ro = 1;
set_disk_ro(mddev->gendisk, 1);
@@ -2625,9 +2713,9 @@ array_state_store(mddev_t *mddev, const char *buf, size_t len)
break;
case read_auto:
if (mddev->pers) {
- if (mddev->ro != 1)
- err = do_md_stop(mddev, 1);
- else
+ if (mddev->ro == 0)
+ err = do_md_stop(mddev, 1, 0);
+ else if (mddev->ro == 1)
err = restart_array(mddev);
if (err == 0) {
mddev->ro = 2;
@@ -2681,8 +2769,10 @@ array_state_store(mddev_t *mddev, const char *buf, size_t len)
}
if (err)
return err;
- else
+ else {
+ sysfs_notify(&mddev->kobj, NULL, "array_state");
return len;
+ }
}
static struct md_sysfs_entry md_array_state =
__ATTR(array_state, S_IRUGO|S_IWUSR, array_state_show, array_state_store);
@@ -2785,7 +2875,7 @@ size_show(mddev_t *mddev, char *page)
return sprintf(page, "%llu\n", (unsigned long long)mddev->size);
}
-static int update_size(mddev_t *mddev, unsigned long size);
+static int update_size(mddev_t *mddev, sector_t num_sectors);
static ssize_t
size_store(mddev_t *mddev, const char *buf, size_t len)
@@ -2802,7 +2892,7 @@ size_store(mddev_t *mddev, const char *buf, size_t len)
return -EINVAL;
if (mddev->pers) {
- err = update_size(mddev, size);
+ err = update_size(mddev, size * 2);
md_update_sb(mddev, 1);
} else {
if (mddev->size == 0 ||
@@ -2841,7 +2931,13 @@ metadata_store(mddev_t *mddev, const char *buf, size_t len)
{
int major, minor;
char *e;
- if (!list_empty(&mddev->disks))
+ /* Changing the details of 'external' metadata is
+ * always permitted. Otherwise there must be
+ * no devices attached to the array.
+ */
+ if (mddev->external && strncmp(buf, "external:", 9) == 0)
+ ;
+ else if (!list_empty(&mddev->disks))
return -EBUSY;
if (cmd_match(buf, "none")) {
@@ -2899,7 +2995,7 @@ action_show(mddev_t *mddev, char *page)
type = "check";
else
type = "repair";
- } else
+ } else if (test_bit(MD_RECOVERY_RECOVER, &mddev->recovery))
type = "recover";
}
return sprintf(page, "%s\n", type);
@@ -2921,15 +3017,19 @@ action_store(mddev_t *mddev, const char *page, size_t len)
} else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
return -EBUSY;
- else if (cmd_match(page, "resync") || cmd_match(page, "recover"))
+ else if (cmd_match(page, "resync"))
+ set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+ else if (cmd_match(page, "recover")) {
+ set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
- else if (cmd_match(page, "reshape")) {
+ } else if (cmd_match(page, "reshape")) {
int err;
if (mddev->pers->start_reshape == NULL)
return -EINVAL;
err = mddev->pers->start_reshape(mddev);
if (err)
return err;
+ sysfs_notify(&mddev->kobj, NULL, "degraded");
} else {
if (cmd_match(page, "check"))
set_bit(MD_RECOVERY_CHECK, &mddev->recovery);
@@ -2940,6 +3040,7 @@ action_store(mddev_t *mddev, const char *page, size_t len)
}
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
md_wakeup_thread(mddev->thread);
+ sysfs_notify(&mddev->kobj, NULL, "sync_action");
return len;
}
@@ -3049,11 +3150,11 @@ static ssize_t
sync_speed_show(mddev_t *mddev, char *page)
{
unsigned long resync, dt, db;
- resync = (mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active));
- dt = ((jiffies - mddev->resync_mark) / HZ);
+ resync = mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active);
+ dt = (jiffies - mddev->resync_mark) / HZ;
if (!dt) dt++;
- db = resync - (mddev->resync_mark_cnt);
- return sprintf(page, "%ld\n", db/dt/2); /* K/sec */
+ db = resync - mddev->resync_mark_cnt;
+ return sprintf(page, "%lu\n", db/dt/2); /* K/sec */
}
static struct md_sysfs_entry md_sync_speed = __ATTR_RO(sync_speed);
@@ -3075,6 +3176,36 @@ sync_completed_show(mddev_t *mddev, char *page)
static struct md_sysfs_entry md_sync_completed = __ATTR_RO(sync_completed);
static ssize_t
+min_sync_show(mddev_t *mddev, char *page)
+{
+ return sprintf(page, "%llu\n",
+ (unsigned long long)mddev->resync_min);
+}
+static ssize_t
+min_sync_store(mddev_t *mddev, const char *buf, size_t len)
+{
+ unsigned long long min;
+ if (strict_strtoull(buf, 10, &min))
+ return -EINVAL;
+ if (min > mddev->resync_max)
+ return -EINVAL;
+ if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
+ return -EBUSY;
+
+ /* Must be a multiple of chunk_size */
+ if (mddev->chunk_size) {
+ if (min & (sector_t)((mddev->chunk_size>>9)-1))
+ return -EINVAL;
+ }
+ mddev->resync_min = min;
+
+ return len;
+}
+
+static struct md_sysfs_entry md_min_sync =
+__ATTR(sync_min, S_IRUGO|S_IWUSR, min_sync_show, min_sync_store);
+
+static ssize_t
max_sync_show(mddev_t *mddev, char *page)
{
if (mddev->resync_max == MaxSector)
@@ -3089,9 +3220,10 @@ max_sync_store(mddev_t *mddev, const char *buf, size_t len)
if (strncmp(buf, "max", 3) == 0)
mddev->resync_max = MaxSector;
else {
- char *ep;
- unsigned long long max = simple_strtoull(buf, &ep, 10);
- if (ep == buf || (*ep != 0 && *ep != '\n'))
+ unsigned long long max;
+ if (strict_strtoull(buf, 10, &max))
+ return -EINVAL;
+ if (max < mddev->resync_min)
return -EINVAL;
if (max < mddev->resync_max &&
test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
@@ -3222,6 +3354,7 @@ static struct attribute *md_redundancy_attrs[] = {
&md_sync_speed.attr,
&md_sync_force_parallel.attr,
&md_sync_completed.attr,
+ &md_min_sync.attr,
&md_max_sync.attr,
&md_suspend_lo.attr,
&md_suspend_hi.attr,
@@ -3326,9 +3459,9 @@ static struct kobject *md_probe(dev_t dev, int *part, void *data)
disk->queue = mddev->queue;
add_disk(disk);
mddev->gendisk = disk;
+ error = kobject_init_and_add(&mddev->kobj, &md_ktype,
+ &disk_to_dev(disk)->kobj, "%s", "md");
mutex_unlock(&disks_mutex);
- error = kobject_init_and_add(&mddev->kobj, &md_ktype, &disk->dev.kobj,
- "%s", "md");
if (error)
printk(KERN_WARNING "md: cannot register %s/md - name in use\n",
disk->disk_name);
@@ -3341,7 +3474,11 @@ static void md_safemode_timeout(unsigned long data)
{
mddev_t *mddev = (mddev_t *) data;
- mddev->safemode = 1;
+ if (!atomic_read(&mddev->writes_pending)) {
+ mddev->safemode = 1;
+ if (mddev->external)
+ set_bit(MD_NOTIFY_ARRAY_STATE, &mddev->flags);
+ }
md_wakeup_thread(mddev->thread);
}
@@ -3382,17 +3519,12 @@ static int do_md_run(mddev_t * mddev)
return -EINVAL;
}
/*
- * chunk-size has to be a power of 2 and multiples of PAGE_SIZE
+ * chunk-size has to be a power of 2
*/
if ( (1 << ffz(~chunk_size)) != chunk_size) {
printk(KERN_ERR "chunk_size of %d not valid\n", chunk_size);
return -EINVAL;
}
- if (chunk_size < PAGE_SIZE) {
- printk(KERN_ERR "too small chunk_size: %d < %ld\n",
- chunk_size, PAGE_SIZE);
- return -EINVAL;
- }
/* devices must have minimum size of one chunk */
rdev_for_each(rdev, tmp, mddev) {
@@ -3410,12 +3542,10 @@ static int do_md_run(mddev_t * mddev)
}
}
-#ifdef CONFIG_KMOD
if (mddev->level != LEVEL_NONE)
request_module("md-level-%d", mddev->level);
else if (mddev->clevel[0])
request_module("md-%s", mddev->clevel);
-#endif
/*
* Drop all container device buffers, from now on
@@ -3432,22 +3562,23 @@ static int do_md_run(mddev_t * mddev)
* We don't want the data to overlap the metadata,
* Internal Bitmap issues has handled elsewhere.
*/
- if (rdev->data_offset < rdev->sb_offset) {
+ if (rdev->data_offset < rdev->sb_start) {
if (mddev->size &&
rdev->data_offset + mddev->size*2
- > rdev->sb_offset*2) {
+ > rdev->sb_start) {
printk("md: %s: data overlaps metadata\n",
mdname(mddev));
return -EINVAL;
}
} else {
- if (rdev->sb_offset*2 + rdev->sb_size/512
+ if (rdev->sb_start + rdev->sb_size/512
> rdev->data_offset) {
printk("md: %s: metadata overlaps data\n",
mdname(mddev));
return -EINVAL;
}
}
+ sysfs_notify(&rdev->kobj, NULL, "state");
}
md_probe(mddev->unit, NULL, NULL);
@@ -3519,7 +3650,9 @@ static int do_md_run(mddev_t * mddev)
mddev->ro = 2; /* read-only, but switch on first write */
err = mddev->pers->run(mddev);
- if (!err && mddev->pers->sync_request) {
+ if (err)
+ printk(KERN_ERR "md: pers->run() failed ...\n");
+ else if (mddev->pers->sync_request) {
err = bitmap_create(mddev);
if (err) {
printk(KERN_ERR "%s: failed to create bitmap (%d)\n",
@@ -3528,7 +3661,6 @@ static int do_md_run(mddev_t * mddev)
}
}
if (err) {
- printk(KERN_ERR "md: pers->run() failed ...\n");
module_put(mddev->pers->owner);
mddev->pers = NULL;
bitmap_destroy(mddev);
@@ -3563,7 +3695,7 @@ static int do_md_run(mddev_t * mddev)
if (mddev->flags)
md_update_sb(mddev, 0);
- set_capacity(disk, mddev->array_size<<1);
+ set_capacity(disk, mddev->array_sectors);
/* If we call blk_queue_make_request here, it will
* re-initialise max_sectors etc which may have been
@@ -3608,45 +3740,35 @@ static int do_md_run(mddev_t * mddev)
mddev->changed = 1;
md_new_event(mddev);
- kobject_uevent(&mddev->gendisk->dev.kobj, KOBJ_CHANGE);
+ sysfs_notify(&mddev->kobj, NULL, "array_state");
+ sysfs_notify(&mddev->kobj, NULL, "sync_action");
+ sysfs_notify(&mddev->kobj, NULL, "degraded");
+ kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
return 0;
}
static int restart_array(mddev_t *mddev)
{
struct gendisk *disk = mddev->gendisk;
- int err;
- /*
- * Complain if it has no devices
- */
- err = -ENXIO;
+ /* Complain if it has no devices */
if (list_empty(&mddev->disks))
- goto out;
-
- if (mddev->pers) {
- err = -EBUSY;
- if (!mddev->ro)
- goto out;
-
- mddev->safemode = 0;
- mddev->ro = 0;
- set_disk_ro(disk, 0);
-
- printk(KERN_INFO "md: %s switched to read-write mode.\n",
- mdname(mddev));
- /*
- * Kick recovery or resync if necessary
- */
- set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
- md_wakeup_thread(mddev->thread);
- md_wakeup_thread(mddev->sync_thread);
- err = 0;
- } else
- err = -EINVAL;
-
-out:
- return err;
+ return -ENXIO;
+ if (!mddev->pers)
+ return -EINVAL;
+ if (!mddev->ro)
+ return -EBUSY;
+ mddev->safemode = 0;
+ mddev->ro = 0;
+ set_disk_ro(disk, 0);
+ printk(KERN_INFO "md: %s switched to read-write mode.\n",
+ mdname(mddev));
+ /* Kick recovery or resync if necessary */
+ set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+ md_wakeup_thread(mddev->thread);
+ md_wakeup_thread(mddev->sync_thread);
+ sysfs_notify(&mddev->kobj, NULL, "array_state");
+ return 0;
}
/* similar to deny_write_access, but accounts for our holding a reference
@@ -3680,16 +3802,17 @@ static void restore_bitmap_write_access(struct file *file)
* 1 - switch to readonly
* 2 - stop but do not disassemble array
*/
-static int do_md_stop(mddev_t * mddev, int mode)
+static int do_md_stop(mddev_t * mddev, int mode, int is_open)
{
int err = 0;
struct gendisk *disk = mddev->gendisk;
+ if (atomic_read(&mddev->openers) > is_open) {
+ printk("md: %s still in use.\n",mdname(mddev));
+ return -EBUSY;
+ }
+
if (mddev->pers) {
- if (atomic_read(&mddev->active)>2) {
- printk("md: %s still in use.\n",mdname(mddev));
- return -EBUSY;
- }
if (mddev->sync_thread) {
set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
@@ -3700,8 +3823,6 @@ static int do_md_stop(mddev_t * mddev, int mode)
del_timer_sync(&mddev->safemode_timer);
- invalidate_partition(disk, 0);
-
switch(mode) {
case 1: /* readonly */
err = -ENXIO;
@@ -3773,10 +3894,11 @@ static int do_md_stop(mddev_t * mddev, int mode)
export_array(mddev);
- mddev->array_size = 0;
+ mddev->array_sectors = 0;
mddev->size = 0;
mddev->raid_disks = 0;
mddev->recovery_cp = 0;
+ mddev->resync_min = 0;
mddev->resync_max = MaxSector;
mddev->reshape_position = MaxSector;
mddev->external = 0;
@@ -3811,6 +3933,7 @@ static int do_md_stop(mddev_t * mddev, int mode)
mdname(mddev));
err = 0;
md_new_event(mddev);
+ sysfs_notify(&mddev->kobj, NULL, "array_state");
out:
return err;
}
@@ -3833,10 +3956,10 @@ static void autorun_array(mddev_t *mddev)
}
printk("\n");
- err = do_md_run (mddev);
+ err = do_md_run(mddev);
if (err) {
printk(KERN_WARNING "md: do_md_run() returned %d\n", err);
- do_md_stop (mddev, 0);
+ do_md_stop(mddev, 0, 0);
}
}
@@ -3927,8 +4050,10 @@ static void autorun_devices(int part)
/* on success, candidates will be empty, on error
* it won't...
*/
- rdev_for_each_list(rdev, tmp, candidates)
+ rdev_for_each_list(rdev, tmp, candidates) {
+ list_del_init(&rdev->same_set);
export_rdev(rdev);
+ }
mddev_put(mddev);
}
printk(KERN_INFO "md: ... autorun DONE.\n");
@@ -4009,9 +4134,11 @@ static int get_bitmap_file(mddev_t * mddev, void __user * arg)
char *ptr, *buf = NULL;
int err = -ENOMEM;
- md_allow_write(mddev);
+ if (md_allow_write(mddev))
+ file = kmalloc(sizeof(*file), GFP_NOIO);
+ else
+ file = kmalloc(sizeof(*file), GFP_KERNEL);
- file = kmalloc(sizeof(*file), GFP_KERNEL);
if (!file)
goto out;
@@ -4044,15 +4171,12 @@ out:
static int get_disk_info(mddev_t * mddev, void __user * arg)
{
mdu_disk_info_t info;
- unsigned int nr;
mdk_rdev_t *rdev;
if (copy_from_user(&info, arg, sizeof(info)))
return -EFAULT;
- nr = info.number;
-
- rdev = find_rdev_nr(mddev, nr);
+ rdev = find_rdev_nr(mddev, info.number);
if (rdev) {
info.major = MAJOR(rdev->bdev->bd_dev);
info.minor = MINOR(rdev->bdev->bd_dev);
@@ -4172,8 +4296,12 @@ static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info)
}
if (err)
export_rdev(rdev);
+ else
+ sysfs_notify(&rdev->kobj, NULL, "state");
md_update_sb(mddev, 1);
+ if (mddev->degraded)
+ set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
md_wakeup_thread(mddev->thread);
return err;
@@ -4190,7 +4318,7 @@ static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info)
if (!(info->state & (1<<MD_DISK_FAULTY))) {
int err;
- rdev = md_import_device (dev, -1, 0);
+ rdev = md_import_device(dev, -1, 0);
if (IS_ERR(rdev)) {
printk(KERN_WARNING
"md: error, md_import_device() returned %ld\n",
@@ -4212,10 +4340,10 @@ static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info)
if (!mddev->persistent) {
printk(KERN_INFO "md: nonpersistent superblock ...\n");
- rdev->sb_offset = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
+ rdev->sb_start = rdev->bdev->bd_inode->i_size / 512;
} else
- rdev->sb_offset = calc_dev_sboffset(rdev->bdev);
- rdev->size = calc_dev_size(rdev, mddev->chunk_size);
+ rdev->sb_start = calc_dev_sboffset(rdev->bdev);
+ rdev->size = calc_num_sectors(rdev, mddev->chunk_size) / 2;
err = bind_rdev_to_array(rdev, mddev);
if (err) {
@@ -4232,9 +4360,6 @@ static int hot_remove_disk(mddev_t * mddev, dev_t dev)
char b[BDEVNAME_SIZE];
mdk_rdev_t *rdev;
- if (!mddev->pers)
- return -ENODEV;
-
rdev = find_rdev(mddev, dev);
if (!rdev)
return -ENXIO;
@@ -4257,7 +4382,6 @@ static int hot_add_disk(mddev_t * mddev, dev_t dev)
{
char b[BDEVNAME_SIZE];
int err;
- unsigned int size;
mdk_rdev_t *rdev;
if (!mddev->pers)
@@ -4276,7 +4400,7 @@ static int hot_add_disk(mddev_t * mddev, dev_t dev)
return -EINVAL;
}
- rdev = md_import_device (dev, -1, 0);
+ rdev = md_import_device(dev, -1, 0);
if (IS_ERR(rdev)) {
printk(KERN_WARNING
"md: error, md_import_device() returned %ld\n",
@@ -4285,13 +4409,11 @@ static int hot_add_disk(mddev_t * mddev, dev_t dev)
}
if (mddev->persistent)
- rdev->sb_offset = calc_dev_sboffset(rdev->bdev);
+ rdev->sb_start = calc_dev_sboffset(rdev->bdev);
else
- rdev->sb_offset =
- rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
+ rdev->sb_start = rdev->bdev->bd_inode->i_size / 512;
- size = calc_dev_size(rdev, mddev->chunk_size);
- rdev->size = size;
+ rdev->size = calc_num_sectors(rdev, mddev->chunk_size) / 2;
if (test_bit(Faulty, &rdev->flags)) {
printk(KERN_WARNING
@@ -4476,44 +4598,50 @@ static int set_array_info(mddev_t * mddev, mdu_array_info_t *info)
return 0;
}
-static int update_size(mddev_t *mddev, unsigned long size)
+static int update_size(mddev_t *mddev, sector_t num_sectors)
{
mdk_rdev_t * rdev;
int rv;
struct list_head *tmp;
- int fit = (size == 0);
+ int fit = (num_sectors == 0);
if (mddev->pers->resize == NULL)
return -EINVAL;
- /* The "size" is the amount of each device that is used.
- * This can only make sense for arrays with redundancy.
- * linear and raid0 always use whatever space is available
- * We can only consider changing the size if no resync
- * or reconstruction is happening, and if the new size
- * is acceptable. It must fit before the sb_offset or,
- * if that is <data_offset, it must fit before the
- * size of each device.
- * If size is zero, we find the largest size that fits.
+ /* The "num_sectors" is the number of sectors of each device that
+ * is used. This can only make sense for arrays with redundancy.
+ * linear and raid0 always use whatever space is available. We can only
+ * consider changing this number if no resync or reconstruction is
+ * happening, and if the new size is acceptable. It must fit before the
+ * sb_start or, if that is <data_offset, it must fit before the size
+ * of each device. If num_sectors is zero, we find the largest size
+ * that fits.
+
*/
if (mddev->sync_thread)
return -EBUSY;
+ if (mddev->bitmap)
+ /* Sorry, cannot grow a bitmap yet, just remove it,
+ * grow, and re-add.
+ */
+ return -EBUSY;
rdev_for_each(rdev, tmp, mddev) {
sector_t avail;
avail = rdev->size * 2;
- if (fit && (size == 0 || size > avail/2))
- size = avail/2;
- if (avail < ((sector_t)size << 1))
+ if (fit && (num_sectors == 0 || num_sectors > avail))
+ num_sectors = avail;
+ if (avail < num_sectors)
return -ENOSPC;
}
- rv = mddev->pers->resize(mddev, (sector_t)size *2);
+ rv = mddev->pers->resize(mddev, num_sectors);
if (!rv) {
struct block_device *bdev;
bdev = bdget_disk(mddev->gendisk, 0);
if (bdev) {
mutex_lock(&bdev->bd_inode->i_mutex);
- i_size_write(bdev->bd_inode, (loff_t)mddev->array_size << 10);
+ i_size_write(bdev->bd_inode,
+ (loff_t)mddev->array_sectors << 9);
mutex_unlock(&bdev->bd_inode->i_mutex);
bdput(bdev);
}
@@ -4588,7 +4716,7 @@ static int update_array_info(mddev_t *mddev, mdu_array_info_t *info)
return mddev->pers->reconfig(mddev, info->layout, -1);
}
if (info->size >= 0 && mddev->size != info->size)
- rv = update_size(mddev, info->size);
+ rv = update_size(mddev, (sector_t)info->size * 2);
if (mddev->raid_disks != info->raid_disks)
rv = update_raid_disks(mddev, info->raid_disks);
@@ -4641,6 +4769,12 @@ static int set_disk_faulty(mddev_t *mddev, dev_t dev)
return 0;
}
+/*
+ * We have a problem here : there is no easy way to give a CHS
+ * virtual geometry. We currently pretend that we have a 2 heads
+ * 4 sectors (with a BIG number of cylinders...). This drives
+ * dosfs just mad... ;-)
+ */
static int md_getgeo(struct block_device *bdev, struct hd_geometry *geo)
{
mddev_t *mddev = bdev->bd_disk->private_data;
@@ -4651,7 +4785,7 @@ static int md_getgeo(struct block_device *bdev, struct hd_geometry *geo)
return 0;
}
-static int md_ioctl(struct inode *inode, struct file *file,
+static int md_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg)
{
int err = 0;
@@ -4689,7 +4823,7 @@ static int md_ioctl(struct inode *inode, struct file *file,
* Commands creating/starting a new array:
*/
- mddev = inode->i_bdev->bd_disk->private_data;
+ mddev = bdev->bd_disk->private_data;
if (!mddev) {
BUG();
@@ -4785,19 +4919,13 @@ static int md_ioctl(struct inode *inode, struct file *file,
goto done_unlock;
case STOP_ARRAY:
- err = do_md_stop (mddev, 0);
+ err = do_md_stop(mddev, 0, 1);
goto done_unlock;
case STOP_ARRAY_RO:
- err = do_md_stop (mddev, 1);
+ err = do_md_stop(mddev, 1, 1);
goto done_unlock;
- /*
- * We have a problem here : there is no easy way to give a CHS
- * virtual geometry. We currently pretend that we have a 2 heads
- * 4 sectors (with a BIG number of cylinders...). This drives
- * dosfs just mad... ;-)
- */
}
/*
@@ -4807,13 +4935,12 @@ static int md_ioctl(struct inode *inode, struct file *file,
* here and hit the 'default' below, so only disallow
* 'md' ioctls, and switch to rw mode if started auto-readonly.
*/
- if (_IOC_TYPE(cmd) == MD_MAJOR &&
- mddev->ro && mddev->pers) {
+ if (_IOC_TYPE(cmd) == MD_MAJOR && mddev->ro && mddev->pers) {
if (mddev->ro == 2) {
mddev->ro = 0;
- set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
- md_wakeup_thread(mddev->thread);
-
+ sysfs_notify(&mddev->kobj, NULL, "array_state");
+ set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+ md_wakeup_thread(mddev->thread);
} else {
err = -EROFS;
goto abort_unlock;
@@ -4845,7 +4972,7 @@ static int md_ioctl(struct inode *inode, struct file *file,
goto done_unlock;
case RUN_ARRAY:
- err = do_md_run (mddev);
+ err = do_md_run(mddev);
goto done_unlock;
case SET_BITMAP_FILE:
@@ -4869,13 +4996,13 @@ abort:
return err;
}
-static int md_open(struct inode *inode, struct file *file)
+static int md_open(struct block_device *bdev, fmode_t mode)
{
/*
* Succeed if we can lock the mddev, which confirms that
* it isn't being stopped right now.
*/
- mddev_t *mddev = inode->i_bdev->bd_disk->private_data;
+ mddev_t *mddev = bdev->bd_disk->private_data;
int err;
if ((err = mutex_lock_interruptible_nested(&mddev->reconfig_mutex, 1)))
@@ -4883,18 +5010,20 @@ static int md_open(struct inode *inode, struct file *file)
err = 0;
mddev_get(mddev);
+ atomic_inc(&mddev->openers);
mddev_unlock(mddev);
- check_disk_change(inode->i_bdev);
+ check_disk_change(bdev);
out:
return err;
}
-static int md_release(struct inode *inode, struct file * file)
+static int md_release(struct gendisk *disk, fmode_t mode)
{
- mddev_t *mddev = inode->i_bdev->bd_disk->private_data;
+ mddev_t *mddev = disk->private_data;
BUG_ON(!mddev);
+ atomic_dec(&mddev->openers);
mddev_put(mddev);
return 0;
@@ -4919,7 +5048,7 @@ static struct block_device_operations md_fops =
.owner = THIS_MODULE,
.open = md_open,
.release = md_release,
- .ioctl = md_ioctl,
+ .locked_ioctl = md_ioctl,
.getgeo = md_getgeo,
.media_changed = md_media_changed,
.revalidate_disk= md_revalidate,
@@ -5029,6 +5158,9 @@ void md_error(mddev_t *mddev, mdk_rdev_t *rdev)
if (!mddev->pers->error_handler)
return;
mddev->pers->error_handler(mddev,rdev);
+ if (mddev->degraded)
+ set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
+ set_bit(StateChanged, &rdev->flags);
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
md_wakeup_thread(mddev->thread);
@@ -5258,10 +5390,11 @@ static int md_seq_show(struct seq_file *seq, void *v)
if (!list_empty(&mddev->disks)) {
if (mddev->pers)
seq_printf(seq, "\n %llu blocks",
- (unsigned long long)mddev->array_size);
+ (unsigned long long)
+ mddev->array_sectors / 2);
else
seq_printf(seq, "\n %llu blocks",
- (unsigned long long)size);
+ (unsigned long long)size);
}
if (mddev->persistent) {
if (mddev->major_version != 0 ||
@@ -5277,11 +5410,11 @@ static int md_seq_show(struct seq_file *seq, void *v)
seq_printf(seq, " super non-persistent");
if (mddev->pers) {
- mddev->pers->status (seq, mddev);
+ mddev->pers->status(seq, mddev);
seq_printf(seq, "\n ");
if (mddev->pers->sync_request) {
if (mddev->curr_resync > 2) {
- status_resync (seq, mddev);
+ status_resync(seq, mddev);
seq_printf(seq, "\n ");
} else if (mddev->curr_resync == 1 || mddev->curr_resync == 2)
seq_printf(seq, "\tresync=DELAYED\n ");
@@ -5391,15 +5524,15 @@ int unregister_md_personality(struct mdk_personality *p)
static int is_mddev_idle(mddev_t *mddev)
{
mdk_rdev_t * rdev;
- struct list_head *tmp;
int idle;
long curr_events;
idle = 1;
- rdev_for_each(rdev, tmp, mddev) {
+ rcu_read_lock();
+ rdev_for_each_rcu(rdev, mddev) {
struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
- curr_events = disk_stat_read(disk, sectors[0]) +
- disk_stat_read(disk, sectors[1]) -
+ curr_events = part_stat_read(&disk->part0, sectors[0]) +
+ part_stat_read(&disk->part0, sectors[1]) -
atomic_read(&disk->sync_io);
/* sync IO will cause sync_io to increase before the disk_stats
* as sync_io is counted when a request starts, and
@@ -5428,6 +5561,7 @@ static int is_mddev_idle(mddev_t *mddev)
idle = 0;
}
}
+ rcu_read_unlock();
return idle;
}
@@ -5451,6 +5585,7 @@ void md_done_sync(mddev_t *mddev, int blocks, int ok)
*/
void md_write_start(mddev_t *mddev, struct bio *bi)
{
+ int did_change = 0;
if (bio_data_dir(bi) != WRITE)
return;
@@ -5461,6 +5596,7 @@ void md_write_start(mddev_t *mddev, struct bio *bi)
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
md_wakeup_thread(mddev->thread);
md_wakeup_thread(mddev->sync_thread);
+ did_change = 1;
}
atomic_inc(&mddev->writes_pending);
if (mddev->safemode == 1)
@@ -5471,10 +5607,12 @@ void md_write_start(mddev_t *mddev, struct bio *bi)
mddev->in_sync = 0;
set_bit(MD_CHANGE_CLEAN, &mddev->flags);
md_wakeup_thread(mddev->thread);
+ did_change = 1;
}
spin_unlock_irq(&mddev->write_lock);
- sysfs_notify(&mddev->kobj, NULL, "array_state");
}
+ if (did_change)
+ sysfs_notify(&mddev->kobj, NULL, "array_state");
wait_event(mddev->sb_wait,
!test_bit(MD_CHANGE_CLEAN, &mddev->flags) &&
!test_bit(MD_CHANGE_PENDING, &mddev->flags));
@@ -5495,13 +5633,18 @@ void md_write_end(mddev_t *mddev)
* may proceed without blocking. It is important to call this before
* attempting a GFP_KERNEL allocation while holding the mddev lock.
* Must be called with mddev_lock held.
+ *
+ * In the ->external case MD_CHANGE_CLEAN can not be cleared until mddev->lock
+ * is dropped, so return -EAGAIN after notifying userspace.
*/
-void md_allow_write(mddev_t *mddev)
+int md_allow_write(mddev_t *mddev)
{
if (!mddev->pers)
- return;
+ return 0;
if (mddev->ro)
- return;
+ return 0;
+ if (!mddev->pers->sync_request)
+ return 0;
spin_lock_irq(&mddev->write_lock);
if (mddev->in_sync) {
@@ -5512,14 +5655,14 @@ void md_allow_write(mddev_t *mddev)
mddev->safemode = 1;
spin_unlock_irq(&mddev->write_lock);
md_update_sb(mddev, 0);
-
sysfs_notify(&mddev->kobj, NULL, "array_state");
- /* wait for the dirty state to be recorded in the metadata */
- wait_event(mddev->sb_wait,
- !test_bit(MD_CHANGE_CLEAN, &mddev->flags) &&
- !test_bit(MD_CHANGE_PENDING, &mddev->flags));
} else
spin_unlock_irq(&mddev->write_lock);
+
+ if (test_bit(MD_CHANGE_CLEAN, &mddev->flags))
+ return -EAGAIN;
+ else
+ return 0;
}
EXPORT_SYMBOL_GPL(md_allow_write);
@@ -5600,7 +5743,11 @@ void md_do_sync(mddev_t *mddev)
* time 'round when curr_resync == 2
*/
continue;
- prepare_to_wait(&resync_wait, &wq, TASK_UNINTERRUPTIBLE);
+ /* We need to wait 'interruptible' so as not to
+ * contribute to the load average, and not to
+ * be caught by 'softlockup'
+ */
+ prepare_to_wait(&resync_wait, &wq, TASK_INTERRUPTIBLE);
if (!kthread_should_stop() &&
mddev2->curr_resync >= mddev->curr_resync) {
printk(KERN_INFO "md: delaying %s of %s"
@@ -5608,6 +5755,8 @@ void md_do_sync(mddev_t *mddev)
" share one or more physical units)\n",
desc, mdname(mddev), mdname(mddev2));
mddev_put(mddev2);
+ if (signal_pending(current))
+ flush_signals(current);
schedule();
finish_wait(&resync_wait, &wq);
goto try_again;
@@ -5625,9 +5774,11 @@ void md_do_sync(mddev_t *mddev)
max_sectors = mddev->resync_max_sectors;
mddev->resync_mismatches = 0;
/* we don't use the checkpoint if there's a bitmap */
- if (!mddev->bitmap &&
- !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
+ if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
+ j = mddev->resync_min;
+ else if (!mddev->bitmap)
j = mddev->recovery_cp;
+
} else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
max_sectors = mddev->size << 1;
else {
@@ -5796,6 +5947,7 @@ void md_do_sync(mddev_t *mddev)
skip:
mddev->curr_resync = 0;
+ mddev->resync_min = 0;
mddev->resync_max = MaxSector;
sysfs_notify(&mddev->kobj, NULL, "sync_completed");
wake_up(&resync_wait);
@@ -5837,15 +5989,17 @@ static int remove_and_add_spares(mddev_t *mddev)
}
}
- if (mddev->degraded) {
+ if (mddev->degraded && ! mddev->ro) {
rdev_for_each(rdev, rtmp, mddev) {
if (rdev->raid_disk >= 0 &&
- !test_bit(In_sync, &rdev->flags))
+ !test_bit(In_sync, &rdev->flags) &&
+ !test_bit(Blocked, &rdev->flags))
spares++;
if (rdev->raid_disk < 0
&& !test_bit(Faulty, &rdev->flags)) {
rdev->recovery_offset = 0;
- if (mddev->pers->hot_add_disk(mddev,rdev)) {
+ if (mddev->pers->
+ hot_add_disk(mddev, rdev) == 0) {
char nm[20];
sprintf(nm, "rd%d", rdev->raid_disk);
if (sysfs_create_link(&mddev->kobj,
@@ -5894,6 +6048,9 @@ void md_check_recovery(mddev_t *mddev)
if (mddev->bitmap)
bitmap_daemon_work(mddev->bitmap);
+ if (test_and_clear_bit(MD_NOTIFY_ARRAY_STATE, &mddev->flags))
+ sysfs_notify(&mddev->kobj, NULL, "array_state");
+
if (mddev->ro)
return;
@@ -5906,6 +6063,8 @@ void md_check_recovery(mddev_t *mddev)
flush_signals(current);
}
+ if (mddev->ro && !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
+ return;
if ( ! (
(mddev->flags && !mddev->external) ||
test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
@@ -5919,24 +6078,41 @@ void md_check_recovery(mddev_t *mddev)
if (mddev_trylock(mddev)) {
int spares = 0;
+ if (mddev->ro) {
+ /* Only thing we do on a ro array is remove
+ * failed devices.
+ */
+ remove_and_add_spares(mddev);
+ clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+ goto unlock;
+ }
+
if (!mddev->external) {
+ int did_change = 0;
spin_lock_irq(&mddev->write_lock);
if (mddev->safemode &&
!atomic_read(&mddev->writes_pending) &&
!mddev->in_sync &&
mddev->recovery_cp == MaxSector) {
mddev->in_sync = 1;
+ did_change = 1;
if (mddev->persistent)
set_bit(MD_CHANGE_CLEAN, &mddev->flags);
}
if (mddev->safemode == 1)
mddev->safemode = 0;
spin_unlock_irq(&mddev->write_lock);
+ if (did_change)
+ sysfs_notify(&mddev->kobj, NULL, "array_state");
}
if (mddev->flags)
md_update_sb(mddev, 0);
+ rdev_for_each(rdev, rtmp, mddev)
+ if (test_and_clear_bit(StateChanged, &rdev->flags))
+ sysfs_notify(&rdev->kobj, NULL, "state");
+
if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
!test_bit(MD_RECOVERY_DONE, &mddev->recovery)) {
@@ -5948,10 +6124,13 @@ void md_check_recovery(mddev_t *mddev)
/* resync has finished, collect result */
md_unregister_thread(mddev->sync_thread);
mddev->sync_thread = NULL;
- if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
+ if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
+ !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
/* success...*/
/* activate any spares */
- mddev->pers->spare_active(mddev);
+ if (mddev->pers->spare_active(mddev))
+ sysfs_notify(&mddev->kobj, NULL,
+ "degraded");
}
md_update_sb(mddev, 1);
@@ -5965,13 +6144,18 @@ void md_check_recovery(mddev_t *mddev)
mddev->recovery = 0;
/* flag recovery needed just to double check */
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+ sysfs_notify(&mddev->kobj, NULL, "sync_action");
md_new_event(mddev);
goto unlock;
}
+ /* Set RUNNING before clearing NEEDED to avoid
+ * any transients in the value of "sync_action".
+ */
+ set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
+ clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
/* Clear some bits that don't mean anything, but
* might be left set
*/
- clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
clear_bit(MD_RECOVERY_INTR, &mddev->recovery);
clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
@@ -5989,17 +6173,20 @@ void md_check_recovery(mddev_t *mddev)
/* Cannot proceed */
goto unlock;
set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
+ clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
} else if ((spares = remove_and_add_spares(mddev))) {
clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
+ clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
+ set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
} else if (mddev->recovery_cp < MaxSector) {
set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
+ clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
} else if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
/* nothing to be done ... */
goto unlock;
if (mddev->pers->sync_request) {
- set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
if (spares && mddev->bitmap && ! mddev->bitmap->file) {
/* We are adding a device or devices to an array
* which has the bitmap stored on all devices.
@@ -6018,9 +6205,16 @@ void md_check_recovery(mddev_t *mddev)
mddev->recovery = 0;
} else
md_wakeup_thread(mddev->sync_thread);
+ sysfs_notify(&mddev->kobj, NULL, "sync_action");
md_new_event(mddev);
}
unlock:
+ if (!mddev->sync_thread) {
+ clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
+ if (test_and_clear_bit(MD_RECOVERY_RECOVER,
+ &mddev->recovery))
+ sysfs_notify(&mddev->kobj, NULL, "sync_action");
+ }
mddev_unlock(mddev);
}
}
@@ -6047,7 +6241,11 @@ static int md_notify_reboot(struct notifier_block *this,
for_each_mddev(mddev, tmp)
if (mddev_trylock(mddev)) {
- do_md_stop (mddev, 1);
+ /* Force a switch to readonly even array
+ * appears to still be in use. Hence
+ * the '100'.
+ */
+ do_md_stop(mddev, 1, 100);
mddev_unlock(mddev);
}
/*
@@ -6091,7 +6289,7 @@ static int __init md_init(void)
raid_table_header = register_sysctl_table(raid_root_table);
md_geninit();
- return (0);
+ return 0;
}
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index e968116e0de9..d4ac47d11279 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -19,16 +19,7 @@
* Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
#include <linux/raid/multipath.h>
-#include <linux/buffer_head.h>
-#include <asm/atomic.h>
-
-#define MAJOR_NR MD_MAJOR
-#define MD_DRIVER
-#define MD_PERSONALITY
#define MAX_WORK_PER_DISK 128
@@ -147,6 +138,7 @@ static int multipath_make_request (struct request_queue *q, struct bio * bio)
struct multipath_bh * mp_bh;
struct multipath_info *multipath;
const int rw = bio_data_dir(bio);
+ int cpu;
if (unlikely(bio_barrier(bio))) {
bio_endio(bio, -EOPNOTSUPP);
@@ -158,8 +150,11 @@ static int multipath_make_request (struct request_queue *q, struct bio * bio)
mp_bh->master_bio = bio;
mp_bh->mddev = mddev;
- disk_stat_inc(mddev->gendisk, ios[rw]);
- disk_stat_add(mddev->gendisk, sectors[rw], bio_sectors(bio));
+ cpu = part_stat_lock();
+ part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
+ part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw],
+ bio_sectors(bio));
+ part_stat_unlock();
mp_bh->path = multipath_map(conf);
if (mp_bh->path < 0) {
@@ -172,7 +167,7 @@ static int multipath_make_request (struct request_queue *q, struct bio * bio)
mp_bh->bio = *bio;
mp_bh->bio.bi_sector += multipath->rdev->data_offset;
mp_bh->bio.bi_bdev = multipath->rdev->bdev;
- mp_bh->bio.bi_rw |= (1 << BIO_RW_FAILFAST);
+ mp_bh->bio.bi_rw |= (1 << BIO_RW_FAILFAST_TRANSPORT);
mp_bh->bio.bi_end_io = multipath_end_request;
mp_bh->bio.bi_private = mp_bh;
generic_make_request(&mp_bh->bio);
@@ -281,13 +276,18 @@ static int multipath_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
{
multipath_conf_t *conf = mddev->private;
struct request_queue *q;
- int found = 0;
+ int err = -EEXIST;
int path;
struct multipath_info *p;
+ int first = 0;
+ int last = mddev->raid_disks - 1;
+
+ if (rdev->raid_disk >= 0)
+ first = last = rdev->raid_disk;
print_multipath_conf(conf);
- for (path=0; path<mddev->raid_disks; path++)
+ for (path = first; path <= last; path++)
if ((p=conf->multipaths+path)->rdev == NULL) {
q = rdev->bdev->bd_disk->queue;
blk_queue_stack_limits(mddev->queue, q);
@@ -307,11 +307,13 @@ static int multipath_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
rdev->raid_disk = path;
set_bit(In_sync, &rdev->flags);
rcu_assign_pointer(p->rdev, rdev);
- found = 1;
+ err = 0;
+ break;
}
print_multipath_conf(conf);
- return found;
+
+ return err;
}
static int multipath_remove_disk(mddev_t *mddev, int number)
@@ -391,7 +393,7 @@ static void multipathd (mddev_t *mddev)
*bio = *(mp_bh->master_bio);
bio->bi_sector += conf->multipaths[mp_bh->path].rdev->data_offset;
bio->bi_bdev = conf->multipaths[mp_bh->path].rdev->bdev;
- bio->bi_rw |= (1 << BIO_RW_FAILFAST);
+ bio->bi_rw |= (1 << BIO_RW_FAILFAST_TRANSPORT);
bio->bi_end_io = multipath_end_request;
bio->bi_private = mp_bh;
generic_make_request(bio);
@@ -497,7 +499,7 @@ static int multipath_run (mddev_t *mddev)
/*
* Ok, everything is just fine now
*/
- mddev->array_size = mddev->size;
+ mddev->array_sectors = mddev->size * 2;
mddev->queue->unplug_fn = multipath_unplug;
mddev->queue->backing_dev_info.congested_fn = multipath_congested;
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index bcbb82594a19..8ac6488ad0dc 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -18,13 +18,8 @@
Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
-#include <linux/module.h>
#include <linux/raid/raid0.h>
-#define MAJOR_NR MD_MAJOR
-#define MD_DRIVER
-#define MD_PERSONALITY
-
static void raid0_unplug(struct request_queue *q)
{
mddev_t *mddev = q->queuedata;
@@ -295,16 +290,16 @@ static int raid0_run (mddev_t *mddev)
goto out_free_conf;
/* calculate array device size */
- mddev->array_size = 0;
+ mddev->array_sectors = 0;
rdev_for_each(rdev, tmp, mddev)
- mddev->array_size += rdev->size;
+ mddev->array_sectors += rdev->size * 2;
printk("raid0 : md_size is %llu blocks.\n",
- (unsigned long long)mddev->array_size);
+ (unsigned long long)mddev->array_sectors / 2);
printk("raid0 : conf->hash_spacing is %llu blocks.\n",
(unsigned long long)conf->hash_spacing);
{
- sector_t s = mddev->array_size;
+ sector_t s = mddev->array_sectors / 2;
sector_t space = conf->hash_spacing;
int round;
conf->preshift = 0;
@@ -399,14 +394,18 @@ static int raid0_make_request (struct request_queue *q, struct bio *bio)
sector_t chunk;
sector_t block, rsect;
const int rw = bio_data_dir(bio);
+ int cpu;
if (unlikely(bio_barrier(bio))) {
bio_endio(bio, -EOPNOTSUPP);
return 0;
}
- disk_stat_inc(mddev->gendisk, ios[rw]);
- disk_stat_add(mddev->gendisk, sectors[rw], bio_sectors(bio));
+ cpu = part_stat_lock();
+ part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
+ part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw],
+ bio_sectors(bio));
+ part_stat_unlock();
chunk_size = mddev->chunk_size >> 10;
chunk_sects = mddev->chunk_size >> 9;
@@ -423,7 +422,7 @@ static int raid0_make_request (struct request_queue *q, struct bio *bio)
/* This is a one page bio that upper layers
* refuse to split for us, so we need to split it.
*/
- bp = bio_split(bio, bio_split_pool, chunk_sects - (bio->bi_sector & (chunk_sects - 1)) );
+ bp = bio_split(bio, chunk_sects - (bio->bi_sector & (chunk_sects - 1)));
if (raid0_make_request(q, &bp->bio1))
generic_make_request(&bp->bio1);
if (raid0_make_request(q, &bp->bio2))
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index c610b947218a..9c788e2489b1 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -32,6 +32,7 @@
*/
#include "dm-bio-list.h"
+#include <linux/delay.h>
#include <linux/raid/raid1.h>
#include <linux/raid/bitmap.h>
@@ -779,7 +780,7 @@ static int make_request(struct request_queue *q, struct bio * bio)
struct page **behind_pages = NULL;
const int rw = bio_data_dir(bio);
const int do_sync = bio_sync(bio);
- int do_barriers;
+ int cpu, do_barriers;
mdk_rdev_t *blocked_rdev;
/*
@@ -804,8 +805,11 @@ static int make_request(struct request_queue *q, struct bio * bio)
bitmap = mddev->bitmap;
- disk_stat_inc(mddev->gendisk, ios[rw]);
- disk_stat_add(mddev->gendisk, sectors[rw], bio_sectors(bio));
+ cpu = part_stat_lock();
+ part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
+ part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw],
+ bio_sectors(bio));
+ part_stat_unlock();
/*
* make_request() can abort the operation when READA is being
@@ -1100,11 +1104,16 @@ static int raid1_spare_active(mddev_t *mddev)
static int raid1_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
{
conf_t *conf = mddev->private;
- int found = 0;
+ int err = -EEXIST;
int mirror = 0;
mirror_info_t *p;
+ int first = 0;
+ int last = mddev->raid_disks - 1;
- for (mirror=0; mirror < mddev->raid_disks; mirror++)
+ if (rdev->raid_disk >= 0)
+ first = last = rdev->raid_disk;
+
+ for (mirror = first; mirror <= last; mirror++)
if ( !(p=conf->mirrors+mirror)->rdev) {
blk_queue_stack_limits(mddev->queue,
@@ -1119,7 +1128,7 @@ static int raid1_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
p->head_position = 0;
rdev->raid_disk = mirror;
- found = 1;
+ err = 0;
/* As all devices are equivalent, we don't need a full recovery
* if this was recently any drive of the array
*/
@@ -1130,7 +1139,7 @@ static int raid1_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
}
print_conf(conf);
- return found;
+ return err;
}
static int raid1_remove_disk(mddev_t *mddev, int number)
@@ -1297,9 +1306,6 @@ static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio)
sbio->bi_size = r1_bio->sectors << 9;
sbio->bi_idx = 0;
sbio->bi_phys_segments = 0;
- sbio->bi_hw_segments = 0;
- sbio->bi_hw_front_size = 0;
- sbio->bi_hw_back_size = 0;
sbio->bi_flags &= ~(BIO_POOL_MASK - 1);
sbio->bi_flags |= 1 << BIO_UPTODATE;
sbio->bi_next = NULL;
@@ -1785,7 +1791,6 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
bio->bi_vcnt = 0;
bio->bi_idx = 0;
bio->bi_phys_segments = 0;
- bio->bi_hw_segments = 0;
bio->bi_size = 0;
bio->bi_end_io = NULL;
bio->bi_private = NULL;
@@ -2038,7 +2043,7 @@ static int run(mddev_t *mddev)
/*
* Ok, everything is just fine now
*/
- mddev->array_size = mddev->size;
+ mddev->array_sectors = mddev->size * 2;
mddev->queue->unplug_fn = raid1_unplug;
mddev->queue->backing_dev_info.congested_fn = raid1_congested;
@@ -2100,14 +2105,15 @@ static int raid1_resize(mddev_t *mddev, sector_t sectors)
* any io in the removed space completes, but it hardly seems
* worth it.
*/
- mddev->array_size = sectors>>1;
- set_capacity(mddev->gendisk, mddev->array_size << 1);
+ mddev->array_sectors = sectors;
+ set_capacity(mddev->gendisk, mddev->array_sectors);
mddev->changed = 1;
- if (mddev->array_size > mddev->size && mddev->recovery_cp == MaxSector) {
+ if (mddev->array_sectors / 2 > mddev->size &&
+ mddev->recovery_cp == MaxSector) {
mddev->recovery_cp = mddev->size << 1;
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
}
- mddev->size = mddev->array_size;
+ mddev->size = mddev->array_sectors / 2;
mddev->resync_max_sectors = sectors;
return 0;
}
@@ -2131,7 +2137,7 @@ static int raid1_reshape(mddev_t *mddev)
conf_t *conf = mddev_to_conf(mddev);
int cnt, raid_disks;
unsigned long flags;
- int d, d2;
+ int d, d2, err;
/* Cannot change chunk_size, layout, or level */
if (mddev->chunk_size != mddev->new_chunk ||
@@ -2143,7 +2149,9 @@ static int raid1_reshape(mddev_t *mddev)
return -EINVAL;
}
- md_allow_write(mddev);
+ err = md_allow_write(mddev);
+ if (err)
+ return err;
raid_disks = mddev->raid_disks + mddev->delta_disks;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 22bb2b1b886d..da5129a24b18 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -19,6 +19,7 @@
*/
#include "dm-bio-list.h"
+#include <linux/delay.h>
#include <linux/raid/raid10.h>
#include <linux/raid/bitmap.h>
@@ -76,11 +77,13 @@ static void r10bio_pool_free(void *r10_bio, void *data)
kfree(r10_bio);
}
+/* Maximum size of each resync request */
#define RESYNC_BLOCK_SIZE (64*1024)
-//#define RESYNC_BLOCK_SIZE PAGE_SIZE
-#define RESYNC_SECTORS (RESYNC_BLOCK_SIZE >> 9)
#define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE)
-#define RESYNC_WINDOW (2048*1024)
+/* amount of memory to reserve for resync requests */
+#define RESYNC_WINDOW (1024*1024)
+/* maximum number of concurrent requests, memory permitting */
+#define RESYNC_DEPTH (32*1024*1024/RESYNC_BLOCK_SIZE)
/*
* When performing a resync, we need to read and compare, so
@@ -215,6 +218,9 @@ static void reschedule_retry(r10bio_t *r10_bio)
conf->nr_queued ++;
spin_unlock_irqrestore(&conf->device_lock, flags);
+ /* wake up frozen array... */
+ wake_up(&conf->wait_barrier);
+
md_wakeup_thread(mddev->thread);
}
@@ -687,7 +693,6 @@ static int flush_pending_writes(conf_t *conf)
* there is no normal IO happeing. It must arrange to call
* lower_barrier when the particular background IO completes.
*/
-#define RESYNC_DEPTH 32
static void raise_barrier(conf_t *conf, int force)
{
@@ -785,6 +790,7 @@ static int make_request(struct request_queue *q, struct bio * bio)
mirror_info_t *mirror;
r10bio_t *r10_bio;
struct bio *read_bio;
+ int cpu;
int i;
int chunk_sects = conf->chunk_mask + 1;
const int rw = bio_data_dir(bio);
@@ -812,7 +818,7 @@ static int make_request(struct request_queue *q, struct bio * bio)
/* This is a one page bio that upper layers
* refuse to split for us, so we need to split it.
*/
- bp = bio_split(bio, bio_split_pool,
+ bp = bio_split(bio,
chunk_sects - (bio->bi_sector & (chunk_sects - 1)) );
if (make_request(q, &bp->bio1))
generic_make_request(&bp->bio1);
@@ -839,8 +845,11 @@ static int make_request(struct request_queue *q, struct bio * bio)
*/
wait_barrier(conf);
- disk_stat_inc(mddev->gendisk, ios[rw]);
- disk_stat_add(mddev->gendisk, sectors[rw], bio_sectors(bio));
+ cpu = part_stat_lock();
+ part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
+ part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw],
+ bio_sectors(bio));
+ part_stat_unlock();
r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
@@ -1114,24 +1123,30 @@ static int raid10_spare_active(mddev_t *mddev)
static int raid10_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
{
conf_t *conf = mddev->private;
- int found = 0;
+ int err = -EEXIST;
int mirror;
mirror_info_t *p;
+ int first = 0;
+ int last = mddev->raid_disks - 1;
if (mddev->recovery_cp < MaxSector)
/* only hot-add to in-sync arrays, as recovery is
* very different from resync
*/
- return 0;
+ return -EBUSY;
if (!enough(conf))
- return 0;
+ return -EINVAL;
+
+ if (rdev->raid_disk)
+ first = last = rdev->raid_disk;
if (rdev->saved_raid_disk >= 0 &&
+ rdev->saved_raid_disk >= first &&
conf->mirrors[rdev->saved_raid_disk].rdev == NULL)
mirror = rdev->saved_raid_disk;
else
- mirror = 0;
- for ( ; mirror < mddev->raid_disks; mirror++)
+ mirror = first;
+ for ( ; mirror <= last ; mirror++)
if ( !(p=conf->mirrors+mirror)->rdev) {
blk_queue_stack_limits(mddev->queue,
@@ -1146,7 +1161,7 @@ static int raid10_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
p->head_position = 0;
rdev->raid_disk = mirror;
- found = 1;
+ err = 0;
if (rdev->saved_raid_disk != mirror)
conf->fullsync = 1;
rcu_assign_pointer(p->rdev, rdev);
@@ -1154,7 +1169,7 @@ static int raid10_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
}
print_conf(conf);
- return found;
+ return err;
}
static int raid10_remove_disk(mddev_t *mddev, int number)
@@ -1335,9 +1350,6 @@ static void sync_request_write(mddev_t *mddev, r10bio_t *r10_bio)
tbio->bi_size = r10_bio->sectors << 9;
tbio->bi_idx = 0;
tbio->bi_phys_segments = 0;
- tbio->bi_hw_segments = 0;
- tbio->bi_hw_front_size = 0;
- tbio->bi_hw_back_size = 0;
tbio->bi_flags &= ~(BIO_POOL_MASK - 1);
tbio->bi_flags |= 1 << BIO_UPTODATE;
tbio->bi_next = NULL;
@@ -1937,7 +1949,6 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
bio->bi_vcnt = 0;
bio->bi_idx = 0;
bio->bi_phys_segments = 0;
- bio->bi_hw_segments = 0;
bio->bi_size = 0;
}
@@ -2018,8 +2029,9 @@ static int run(mddev_t *mddev)
int nc, fc, fo;
sector_t stride, size;
- if (mddev->chunk_size == 0) {
- printk(KERN_ERR "md/raid10: non-zero chunk size required.\n");
+ if (mddev->chunk_size < PAGE_SIZE) {
+ printk(KERN_ERR "md/raid10: chunk size must be "
+ "at least PAGE_SIZE(%ld).\n", PAGE_SIZE);
return -EINVAL;
}
@@ -2159,7 +2171,7 @@ static int run(mddev_t *mddev)
/*
* Ok, everything is just fine now
*/
- mddev->array_size = size << (conf->chunk_shift-1);
+ mddev->array_sectors = size << conf->chunk_shift;
mddev->resync_max_sectors = size << conf->chunk_shift;
mddev->queue->unplug_fn = raid10_unplug;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 9ce7154845c6..a36a7435edf5 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -43,12 +43,7 @@
* miss any bits.
*/
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/highmem.h>
-#include <linux/bitops.h>
#include <linux/kthread.h>
-#include <asm/atomic.h>
#include "raid6.h"
#include <linux/raid/bitmap.h>
@@ -101,6 +96,40 @@
const char raid6_empty_zero_page[PAGE_SIZE] __attribute__((aligned(256)));
#endif
+/*
+ * We maintain a biased count of active stripes in the bottom 16 bits of
+ * bi_phys_segments, and a count of processed stripes in the upper 16 bits
+ */
+static inline int raid5_bi_phys_segments(struct bio *bio)
+{
+ return bio->bi_phys_segments & 0xffff;
+}
+
+static inline int raid5_bi_hw_segments(struct bio *bio)
+{
+ return (bio->bi_phys_segments >> 16) & 0xffff;
+}
+
+static inline int raid5_dec_bi_phys_segments(struct bio *bio)
+{
+ --bio->bi_phys_segments;
+ return raid5_bi_phys_segments(bio);
+}
+
+static inline int raid5_dec_bi_hw_segments(struct bio *bio)
+{
+ unsigned short val = raid5_bi_hw_segments(bio);
+
+ --val;
+ bio->bi_phys_segments = (val << 16) | raid5_bi_phys_segments(bio);
+ return val;
+}
+
+static inline void raid5_set_bi_hw_segments(struct bio *bio, unsigned int cnt)
+{
+ bio->bi_phys_segments = raid5_bi_phys_segments(bio) || (cnt << 16);
+}
+
static inline int raid6_next_disk(int disk, int raid_disks)
{
disk++;
@@ -115,15 +144,20 @@ static void return_io(struct bio *return_bi)
return_bi = bi->bi_next;
bi->bi_next = NULL;
bi->bi_size = 0;
- bi->bi_end_io(bi,
- test_bit(BIO_UPTODATE, &bi->bi_flags)
- ? 0 : -EIO);
+ bio_endio(bi, 0);
bi = return_bi;
}
}
static void print_raid5_conf (raid5_conf_t *conf);
+static int stripe_operations_active(struct stripe_head *sh)
+{
+ return sh->check_state || sh->reconstruct_state ||
+ test_bit(STRIPE_BIOFILL_RUN, &sh->state) ||
+ test_bit(STRIPE_COMPUTE_RUN, &sh->state);
+}
+
static void __release_stripe(raid5_conf_t *conf, struct stripe_head *sh)
{
if (atomic_dec_and_test(&sh->count)) {
@@ -143,7 +177,7 @@ static void __release_stripe(raid5_conf_t *conf, struct stripe_head *sh)
}
md_wakeup_thread(conf->mddev->thread);
} else {
- BUG_ON(sh->ops.pending);
+ BUG_ON(stripe_operations_active(sh));
if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
atomic_dec(&conf->preread_active_stripes);
if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD)
@@ -236,7 +270,7 @@ static int grow_buffers(struct stripe_head *sh, int num)
return 0;
}
-static void raid5_build_block (struct stripe_head *sh, int i);
+static void raid5_build_block(struct stripe_head *sh, int i);
static void init_stripe(struct stripe_head *sh, sector_t sector, int pd_idx, int disks)
{
@@ -245,7 +279,7 @@ static void init_stripe(struct stripe_head *sh, sector_t sector, int pd_idx, int
BUG_ON(atomic_read(&sh->count) != 0);
BUG_ON(test_bit(STRIPE_HANDLE, &sh->state));
- BUG_ON(sh->ops.pending || sh->ops.ack || sh->ops.complete);
+ BUG_ON(stripe_operations_active(sh));
CHECK_DEVLOCK();
pr_debug("init_stripe called, stripe %llu\n",
@@ -346,62 +380,18 @@ static struct stripe_head *get_active_stripe(raid5_conf_t *conf, sector_t sector
return sh;
}
-/* test_and_ack_op() ensures that we only dequeue an operation once */
-#define test_and_ack_op(op, pend) \
-do { \
- if (test_bit(op, &sh->ops.pending) && \
- !test_bit(op, &sh->ops.complete)) { \
- if (test_and_set_bit(op, &sh->ops.ack)) \
- clear_bit(op, &pend); \
- else \
- ack++; \
- } else \
- clear_bit(op, &pend); \
-} while (0)
-
-/* find new work to run, do not resubmit work that is already
- * in flight
- */
-static unsigned long get_stripe_work(struct stripe_head *sh)
-{
- unsigned long pending;
- int ack = 0;
-
- pending = sh->ops.pending;
-
- test_and_ack_op(STRIPE_OP_BIOFILL, pending);
- test_and_ack_op(STRIPE_OP_COMPUTE_BLK, pending);
- test_and_ack_op(STRIPE_OP_PREXOR, pending);
- test_and_ack_op(STRIPE_OP_BIODRAIN, pending);
- test_and_ack_op(STRIPE_OP_POSTXOR, pending);
- test_and_ack_op(STRIPE_OP_CHECK, pending);
- if (test_and_clear_bit(STRIPE_OP_IO, &sh->ops.pending))
- ack++;
-
- sh->ops.count -= ack;
- if (unlikely(sh->ops.count < 0)) {
- printk(KERN_ERR "pending: %#lx ops.pending: %#lx ops.ack: %#lx "
- "ops.complete: %#lx\n", pending, sh->ops.pending,
- sh->ops.ack, sh->ops.complete);
- BUG();
- }
-
- return pending;
-}
-
static void
raid5_end_read_request(struct bio *bi, int error);
static void
raid5_end_write_request(struct bio *bi, int error);
-static void ops_run_io(struct stripe_head *sh)
+static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
{
raid5_conf_t *conf = sh->raid_conf;
int i, disks = sh->disks;
might_sleep();
- set_bit(STRIPE_IO_STARTED, &sh->state);
for (i = disks; i--; ) {
int rw;
struct bio *bi;
@@ -430,11 +420,11 @@ static void ops_run_io(struct stripe_head *sh)
rcu_read_unlock();
if (rdev) {
- if (test_bit(STRIPE_SYNCING, &sh->state) ||
- test_bit(STRIPE_EXPAND_SOURCE, &sh->state) ||
- test_bit(STRIPE_EXPAND_READY, &sh->state))
+ if (s->syncing || s->expanding || s->expanded)
md_sync_acct(rdev->bdev, STRIPE_SECTORS);
+ set_bit(STRIPE_IO_STARTED, &sh->state);
+
bi->bi_bdev = rdev->bdev;
pr_debug("%s: for %llu schedule op %ld on disc %d\n",
__func__, (unsigned long long)sh->sector,
@@ -528,38 +518,34 @@ static void ops_complete_biofill(void *stripe_head_ref)
(unsigned long long)sh->sector);
/* clear completed biofills */
+ spin_lock_irq(&conf->device_lock);
for (i = sh->disks; i--; ) {
struct r5dev *dev = &sh->dev[i];
/* acknowledge completion of a biofill operation */
/* and check if we need to reply to a read request,
* new R5_Wantfill requests are held off until
- * !test_bit(STRIPE_OP_BIOFILL, &sh->ops.pending)
+ * !STRIPE_BIOFILL_RUN
*/
if (test_and_clear_bit(R5_Wantfill, &dev->flags)) {
struct bio *rbi, *rbi2;
- /* The access to dev->read is outside of the
- * spin_lock_irq(&conf->device_lock), but is protected
- * by the STRIPE_OP_BIOFILL pending bit
- */
BUG_ON(!dev->read);
rbi = dev->read;
dev->read = NULL;
while (rbi && rbi->bi_sector <
dev->sector + STRIPE_SECTORS) {
rbi2 = r5_next_bio(rbi, dev->sector);
- spin_lock_irq(&conf->device_lock);
- if (--rbi->bi_phys_segments == 0) {
+ if (!raid5_dec_bi_phys_segments(rbi)) {
rbi->bi_next = return_bi;
return_bi = rbi;
}
- spin_unlock_irq(&conf->device_lock);
rbi = rbi2;
}
}
}
- set_bit(STRIPE_OP_BIOFILL, &sh->ops.complete);
+ spin_unlock_irq(&conf->device_lock);
+ clear_bit(STRIPE_BIOFILL_RUN, &sh->state);
return_io(return_bi);
@@ -610,13 +596,14 @@ static void ops_complete_compute5(void *stripe_head_ref)
set_bit(R5_UPTODATE, &tgt->flags);
BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
clear_bit(R5_Wantcompute, &tgt->flags);
- set_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.complete);
+ clear_bit(STRIPE_COMPUTE_RUN, &sh->state);
+ if (sh->check_state == check_state_compute_run)
+ sh->check_state = check_state_compute_result;
set_bit(STRIPE_HANDLE, &sh->state);
release_stripe(sh);
}
-static struct dma_async_tx_descriptor *
-ops_run_compute5(struct stripe_head *sh, unsigned long pending)
+static struct dma_async_tx_descriptor *ops_run_compute5(struct stripe_head *sh)
{
/* kernel stack size limits the total number of disks */
int disks = sh->disks;
@@ -646,10 +633,6 @@ ops_run_compute5(struct stripe_head *sh, unsigned long pending)
ASYNC_TX_XOR_ZERO_DST, NULL,
ops_complete_compute5, sh);
- /* ack now if postxor is not set to be run */
- if (tx && !test_bit(STRIPE_OP_POSTXOR, &pending))
- async_tx_ack(tx);
-
return tx;
}
@@ -659,8 +642,6 @@ static void ops_complete_prexor(void *stripe_head_ref)
pr_debug("%s: stripe %llu\n", __func__,
(unsigned long long)sh->sector);
-
- set_bit(STRIPE_OP_PREXOR, &sh->ops.complete);
}
static struct dma_async_tx_descriptor *
@@ -680,7 +661,7 @@ ops_run_prexor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
for (i = disks; i--; ) {
struct r5dev *dev = &sh->dev[i];
/* Only process blocks that are known to be uptodate */
- if (dev->towrite && test_bit(R5_Wantprexor, &dev->flags))
+ if (test_bit(R5_Wantdrain, &dev->flags))
xor_srcs[count++] = dev->page;
}
@@ -692,16 +673,10 @@ ops_run_prexor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
}
static struct dma_async_tx_descriptor *
-ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx,
- unsigned long pending)
+ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
{
int disks = sh->disks;
- int pd_idx = sh->pd_idx, i;
-
- /* check if prexor is active which means only process blocks
- * that are part of a read-modify-write (Wantprexor)
- */
- int prexor = test_bit(STRIPE_OP_PREXOR, &pending);
+ int i;
pr_debug("%s: stripe %llu\n", __func__,
(unsigned long long)sh->sector);
@@ -709,20 +684,8 @@ ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx,
for (i = disks; i--; ) {
struct r5dev *dev = &sh->dev[i];
struct bio *chosen;
- int towrite;
-
- towrite = 0;
- if (prexor) { /* rmw */
- if (dev->towrite &&
- test_bit(R5_Wantprexor, &dev->flags))
- towrite = 1;
- } else { /* rcw */
- if (i != pd_idx && dev->towrite &&
- test_bit(R5_LOCKED, &dev->flags))
- towrite = 1;
- }
- if (towrite) {
+ if (test_and_clear_bit(R5_Wantdrain, &dev->flags)) {
struct bio *wbi;
spin_lock(&sh->lock);
@@ -747,18 +710,6 @@ ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx,
static void ops_complete_postxor(void *stripe_head_ref)
{
struct stripe_head *sh = stripe_head_ref;
-
- pr_debug("%s: stripe %llu\n", __func__,
- (unsigned long long)sh->sector);
-
- set_bit(STRIPE_OP_POSTXOR, &sh->ops.complete);
- set_bit(STRIPE_HANDLE, &sh->state);
- release_stripe(sh);
-}
-
-static void ops_complete_write(void *stripe_head_ref)
-{
- struct stripe_head *sh = stripe_head_ref;
int disks = sh->disks, i, pd_idx = sh->pd_idx;
pr_debug("%s: stripe %llu\n", __func__,
@@ -770,16 +721,21 @@ static void ops_complete_write(void *stripe_head_ref)
set_bit(R5_UPTODATE, &dev->flags);
}
- set_bit(STRIPE_OP_BIODRAIN, &sh->ops.complete);
- set_bit(STRIPE_OP_POSTXOR, &sh->ops.complete);
+ if (sh->reconstruct_state == reconstruct_state_drain_run)
+ sh->reconstruct_state = reconstruct_state_drain_result;
+ else if (sh->reconstruct_state == reconstruct_state_prexor_drain_run)
+ sh->reconstruct_state = reconstruct_state_prexor_drain_result;
+ else {
+ BUG_ON(sh->reconstruct_state != reconstruct_state_run);
+ sh->reconstruct_state = reconstruct_state_result;
+ }
set_bit(STRIPE_HANDLE, &sh->state);
release_stripe(sh);
}
static void
-ops_run_postxor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx,
- unsigned long pending)
+ops_run_postxor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
{
/* kernel stack size limits the total number of disks */
int disks = sh->disks;
@@ -787,9 +743,8 @@ ops_run_postxor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx,
int count = 0, pd_idx = sh->pd_idx, i;
struct page *xor_dest;
- int prexor = test_bit(STRIPE_OP_PREXOR, &pending);
+ int prexor = 0;
unsigned long flags;
- dma_async_tx_callback callback;
pr_debug("%s: stripe %llu\n", __func__,
(unsigned long long)sh->sector);
@@ -797,7 +752,8 @@ ops_run_postxor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx,
/* check if prexor is active which means only process blocks
* that are part of a read-modify-write (written)
*/
- if (prexor) {
+ if (sh->reconstruct_state == reconstruct_state_prexor_drain_run) {
+ prexor = 1;
xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page;
for (i = disks; i--; ) {
struct r5dev *dev = &sh->dev[i];
@@ -813,10 +769,6 @@ ops_run_postxor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx,
}
}
- /* check whether this postxor is part of a write */
- callback = test_bit(STRIPE_OP_BIODRAIN, &pending) ?
- ops_complete_write : ops_complete_postxor;
-
/* 1/ if we prexor'd then the dest is reused as a source
* 2/ if we did not prexor then we are redoing the parity
* set ASYNC_TX_XOR_DROP_DST and ASYNC_TX_XOR_ZERO_DST
@@ -830,25 +782,20 @@ ops_run_postxor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx,
if (unlikely(count == 1)) {
flags &= ~(ASYNC_TX_XOR_DROP_DST | ASYNC_TX_XOR_ZERO_DST);
tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE,
- flags, tx, callback, sh);
+ flags, tx, ops_complete_postxor, sh);
} else
tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE,
- flags, tx, callback, sh);
+ flags, tx, ops_complete_postxor, sh);
}
static void ops_complete_check(void *stripe_head_ref)
{
struct stripe_head *sh = stripe_head_ref;
- int pd_idx = sh->pd_idx;
pr_debug("%s: stripe %llu\n", __func__,
(unsigned long long)sh->sector);
- if (test_and_clear_bit(STRIPE_OP_MOD_DMA_CHECK, &sh->ops.pending) &&
- sh->ops.zero_sum_result == 0)
- set_bit(R5_UPTODATE, &sh->dev[pd_idx].flags);
-
- set_bit(STRIPE_OP_CHECK, &sh->ops.complete);
+ sh->check_state = check_state_check_result;
set_bit(STRIPE_HANDLE, &sh->state);
release_stripe(sh);
}
@@ -875,46 +822,42 @@ static void ops_run_check(struct stripe_head *sh)
tx = async_xor_zero_sum(xor_dest, xor_srcs, 0, count, STRIPE_SIZE,
&sh->ops.zero_sum_result, 0, NULL, NULL, NULL);
- if (tx)
- set_bit(STRIPE_OP_MOD_DMA_CHECK, &sh->ops.pending);
- else
- clear_bit(STRIPE_OP_MOD_DMA_CHECK, &sh->ops.pending);
-
atomic_inc(&sh->count);
tx = async_trigger_callback(ASYNC_TX_DEP_ACK | ASYNC_TX_ACK, tx,
ops_complete_check, sh);
}
-static void raid5_run_ops(struct stripe_head *sh, unsigned long pending)
+static void raid5_run_ops(struct stripe_head *sh, unsigned long ops_request)
{
int overlap_clear = 0, i, disks = sh->disks;
struct dma_async_tx_descriptor *tx = NULL;
- if (test_bit(STRIPE_OP_BIOFILL, &pending)) {
+ if (test_bit(STRIPE_OP_BIOFILL, &ops_request)) {
ops_run_biofill(sh);
overlap_clear++;
}
- if (test_bit(STRIPE_OP_COMPUTE_BLK, &pending))
- tx = ops_run_compute5(sh, pending);
+ if (test_bit(STRIPE_OP_COMPUTE_BLK, &ops_request)) {
+ tx = ops_run_compute5(sh);
+ /* terminate the chain if postxor is not set to be run */
+ if (tx && !test_bit(STRIPE_OP_POSTXOR, &ops_request))
+ async_tx_ack(tx);
+ }
- if (test_bit(STRIPE_OP_PREXOR, &pending))
+ if (test_bit(STRIPE_OP_PREXOR, &ops_request))
tx = ops_run_prexor(sh, tx);
- if (test_bit(STRIPE_OP_BIODRAIN, &pending)) {
- tx = ops_run_biodrain(sh, tx, pending);
+ if (test_bit(STRIPE_OP_BIODRAIN, &ops_request)) {
+ tx = ops_run_biodrain(sh, tx);
overlap_clear++;
}
- if (test_bit(STRIPE_OP_POSTXOR, &pending))
- ops_run_postxor(sh, tx, pending);
+ if (test_bit(STRIPE_OP_POSTXOR, &ops_request))
+ ops_run_postxor(sh, tx);
- if (test_bit(STRIPE_OP_CHECK, &pending))
+ if (test_bit(STRIPE_OP_CHECK, &ops_request))
ops_run_check(sh);
- if (test_bit(STRIPE_OP_IO, &pending))
- ops_run_io(sh);
-
if (overlap_clear)
for (i = disks; i--; ) {
struct r5dev *dev = &sh->dev[i];
@@ -997,14 +940,16 @@ static int resize_stripes(raid5_conf_t *conf, int newsize)
struct stripe_head *osh, *nsh;
LIST_HEAD(newstripes);
struct disk_info *ndisks;
- int err = 0;
+ int err;
struct kmem_cache *sc;
int i;
if (newsize <= conf->pool_size)
return 0; /* never bother to shrink */
- md_allow_write(conf->mddev);
+ err = md_allow_write(conf->mddev);
+ if (err)
+ return err;
/* Step 1 */
sc = kmem_cache_create(conf->cache_name[1-conf->active_name],
@@ -1201,7 +1146,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
release_stripe(sh);
}
-static void raid5_end_write_request (struct bio *bi, int error)
+static void raid5_end_write_request(struct bio *bi, int error)
{
struct stripe_head *sh = bi->bi_private;
raid5_conf_t *conf = sh->raid_conf;
@@ -1233,7 +1178,7 @@ static void raid5_end_write_request (struct bio *bi, int error)
static sector_t compute_blocknr(struct stripe_head *sh, int i);
-static void raid5_build_block (struct stripe_head *sh, int i)
+static void raid5_build_block(struct stripe_head *sh, int i)
{
struct r5dev *dev = &sh->dev[i];
@@ -1271,10 +1216,10 @@ static void error(mddev_t *mddev, mdk_rdev_t *rdev)
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
}
set_bit(Faulty, &rdev->flags);
- printk (KERN_ALERT
- "raid5: Disk failure on %s, disabling device.\n"
- "raid5: Operation continuing on %d devices.\n",
- bdevname(rdev->bdev,b), conf->raid_disks - mddev->degraded);
+ printk(KERN_ALERT
+ "raid5: Disk failure on %s, disabling device.\n"
+ "raid5: Operation continuing on %d devices.\n",
+ bdevname(rdev->bdev,b), conf->raid_disks - mddev->degraded);
}
}
@@ -1370,8 +1315,8 @@ static sector_t raid5_compute_sector(sector_t r_sector, unsigned int raid_disks,
*dd_idx = (*pd_idx + 2 + *dd_idx) % raid_disks;
break;
default:
- printk (KERN_CRIT "raid6: unsupported algorithm %d\n",
- conf->algorithm);
+ printk(KERN_CRIT "raid6: unsupported algorithm %d\n",
+ conf->algorithm);
}
break;
}
@@ -1446,8 +1391,8 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i)
}
break;
default:
- printk (KERN_CRIT "raid6: unsupported algorithm %d\n",
- conf->algorithm);
+ printk(KERN_CRIT "raid6: unsupported algorithm %d\n",
+ conf->algorithm);
}
break;
}
@@ -1455,7 +1400,7 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i)
chunk_number = stripe * data_disks + i;
r_sector = (sector_t)chunk_number * sectors_per_chunk + chunk_offset;
- check = raid5_compute_sector (r_sector, raid_disks, data_disks, &dummy1, &dummy2, conf);
+ check = raid5_compute_sector(r_sector, raid_disks, data_disks, &dummy1, &dummy2, conf);
if (check != sh->sector || dummy1 != dd_idx || dummy2 != sh->pd_idx) {
printk(KERN_ERR "compute_blocknr: map not correct\n");
return 0;
@@ -1703,11 +1648,11 @@ static void compute_block_2(struct stripe_head *sh, int dd_idx1, int dd_idx2)
}
}
-static int
-handle_write_operations5(struct stripe_head *sh, int rcw, int expand)
+static void
+schedule_reconstruction5(struct stripe_head *sh, struct stripe_head_state *s,
+ int rcw, int expand)
{
int i, pd_idx = sh->pd_idx, disks = sh->disks;
- int locked = 0;
if (rcw) {
/* if we are not expanding this is a proper write request, and
@@ -1715,53 +1660,48 @@ handle_write_operations5(struct stripe_head *sh, int rcw, int expand)
* stripe cache
*/
if (!expand) {
- set_bit(STRIPE_OP_BIODRAIN, &sh->ops.pending);
- sh->ops.count++;
- }
+ sh->reconstruct_state = reconstruct_state_drain_run;
+ set_bit(STRIPE_OP_BIODRAIN, &s->ops_request);
+ } else
+ sh->reconstruct_state = reconstruct_state_run;
- set_bit(STRIPE_OP_POSTXOR, &sh->ops.pending);
- sh->ops.count++;
+ set_bit(STRIPE_OP_POSTXOR, &s->ops_request);
for (i = disks; i--; ) {
struct r5dev *dev = &sh->dev[i];
if (dev->towrite) {
set_bit(R5_LOCKED, &dev->flags);
+ set_bit(R5_Wantdrain, &dev->flags);
if (!expand)
clear_bit(R5_UPTODATE, &dev->flags);
- locked++;
+ s->locked++;
}
}
- if (locked + 1 == disks)
+ if (s->locked + 1 == disks)
if (!test_and_set_bit(STRIPE_FULL_WRITE, &sh->state))
atomic_inc(&sh->raid_conf->pending_full_writes);
} else {
BUG_ON(!(test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags) ||
test_bit(R5_Wantcompute, &sh->dev[pd_idx].flags)));
- set_bit(STRIPE_OP_PREXOR, &sh->ops.pending);
- set_bit(STRIPE_OP_BIODRAIN, &sh->ops.pending);
- set_bit(STRIPE_OP_POSTXOR, &sh->ops.pending);
-
- sh->ops.count += 3;
+ sh->reconstruct_state = reconstruct_state_prexor_drain_run;
+ set_bit(STRIPE_OP_PREXOR, &s->ops_request);
+ set_bit(STRIPE_OP_BIODRAIN, &s->ops_request);
+ set_bit(STRIPE_OP_POSTXOR, &s->ops_request);
for (i = disks; i--; ) {
struct r5dev *dev = &sh->dev[i];
if (i == pd_idx)
continue;
- /* For a read-modify write there may be blocks that are
- * locked for reading while others are ready to be
- * written so we distinguish these blocks by the
- * R5_Wantprexor bit
- */
if (dev->towrite &&
(test_bit(R5_UPTODATE, &dev->flags) ||
- test_bit(R5_Wantcompute, &dev->flags))) {
- set_bit(R5_Wantprexor, &dev->flags);
+ test_bit(R5_Wantcompute, &dev->flags))) {
+ set_bit(R5_Wantdrain, &dev->flags);
set_bit(R5_LOCKED, &dev->flags);
clear_bit(R5_UPTODATE, &dev->flags);
- locked++;
+ s->locked++;
}
}
}
@@ -1771,13 +1711,11 @@ handle_write_operations5(struct stripe_head *sh, int rcw, int expand)
*/
set_bit(R5_LOCKED, &sh->dev[pd_idx].flags);
clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags);
- locked++;
+ s->locked++;
- pr_debug("%s: stripe %llu locked: %d pending: %lx\n",
+ pr_debug("%s: stripe %llu locked: %d ops_request: %lx\n",
__func__, (unsigned long long)sh->sector,
- locked, sh->ops.pending);
-
- return locked;
+ s->locked, s->ops_request);
}
/*
@@ -1816,7 +1754,7 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
if (*bip)
bi->bi_next = *bip;
*bip = bi;
- bi->bi_phys_segments ++;
+ bi->bi_phys_segments++;
spin_unlock_irq(&conf->device_lock);
spin_unlock(&sh->lock);
@@ -1876,7 +1814,7 @@ static int stripe_to_pdidx(sector_t stripe, raid5_conf_t *conf, int disks)
}
static void
-handle_requests_to_failed_array(raid5_conf_t *conf, struct stripe_head *sh,
+handle_failed_stripe(raid5_conf_t *conf, struct stripe_head *sh,
struct stripe_head_state *s, int disks,
struct bio **return_bi)
{
@@ -1910,7 +1848,7 @@ handle_requests_to_failed_array(raid5_conf_t *conf, struct stripe_head *sh,
sh->dev[i].sector + STRIPE_SECTORS) {
struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector);
clear_bit(BIO_UPTODATE, &bi->bi_flags);
- if (--bi->bi_phys_segments == 0) {
+ if (!raid5_dec_bi_phys_segments(bi)) {
md_write_end(conf->mddev);
bi->bi_next = *return_bi;
*return_bi = bi;
@@ -1925,7 +1863,7 @@ handle_requests_to_failed_array(raid5_conf_t *conf, struct stripe_head *sh,
sh->dev[i].sector + STRIPE_SECTORS) {
struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector);
clear_bit(BIO_UPTODATE, &bi->bi_flags);
- if (--bi->bi_phys_segments == 0) {
+ if (!raid5_dec_bi_phys_segments(bi)) {
md_write_end(conf->mddev);
bi->bi_next = *return_bi;
*return_bi = bi;
@@ -1949,7 +1887,7 @@ handle_requests_to_failed_array(raid5_conf_t *conf, struct stripe_head *sh,
struct bio *nextbi =
r5_next_bio(bi, sh->dev[i].sector);
clear_bit(BIO_UPTODATE, &bi->bi_flags);
- if (--bi->bi_phys_segments == 0) {
+ if (!raid5_dec_bi_phys_segments(bi)) {
bi->bi_next = *return_bi;
*return_bi = bi;
}
@@ -1967,48 +1905,38 @@ handle_requests_to_failed_array(raid5_conf_t *conf, struct stripe_head *sh,
md_wakeup_thread(conf->mddev->thread);
}
-/* __handle_issuing_new_read_requests5 - returns 0 if there are no more disks
- * to process
+/* fetch_block5 - checks the given member device to see if its data needs
+ * to be read or computed to satisfy a request.
+ *
+ * Returns 1 when no more member devices need to be checked, otherwise returns
+ * 0 to tell the loop in handle_stripe_fill5 to continue
*/
-static int __handle_issuing_new_read_requests5(struct stripe_head *sh,
- struct stripe_head_state *s, int disk_idx, int disks)
+static int fetch_block5(struct stripe_head *sh, struct stripe_head_state *s,
+ int disk_idx, int disks)
{
struct r5dev *dev = &sh->dev[disk_idx];
struct r5dev *failed_dev = &sh->dev[s->failed_num];
- /* don't schedule compute operations or reads on the parity block while
- * a check is in flight
- */
- if ((disk_idx == sh->pd_idx) &&
- test_bit(STRIPE_OP_CHECK, &sh->ops.pending))
- return ~0;
-
/* is the data in this block needed, and can we get it? */
if (!test_bit(R5_LOCKED, &dev->flags) &&
- !test_bit(R5_UPTODATE, &dev->flags) && (dev->toread ||
- (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags)) ||
- s->syncing || s->expanding || (s->failed &&
- (failed_dev->toread || (failed_dev->towrite &&
- !test_bit(R5_OVERWRITE, &failed_dev->flags)
- ))))) {
- /* 1/ We would like to get this block, possibly by computing it,
- * but we might not be able to.
- *
- * 2/ Since parity check operations potentially make the parity
- * block !uptodate it will need to be refreshed before any
- * compute operations on data disks are scheduled.
- *
- * 3/ We hold off parity block re-reads until check operations
- * have quiesced.
+ !test_bit(R5_UPTODATE, &dev->flags) &&
+ (dev->toread ||
+ (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags)) ||
+ s->syncing || s->expanding ||
+ (s->failed &&
+ (failed_dev->toread ||
+ (failed_dev->towrite &&
+ !test_bit(R5_OVERWRITE, &failed_dev->flags)))))) {
+ /* We would like to get this block, possibly by computing it,
+ * otherwise read it if the backing disk is insync
*/
if ((s->uptodate == disks - 1) &&
- (s->failed && disk_idx == s->failed_num) &&
- !test_bit(STRIPE_OP_CHECK, &sh->ops.pending)) {
- set_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.pending);
+ (s->failed && disk_idx == s->failed_num)) {
+ set_bit(STRIPE_COMPUTE_RUN, &sh->state);
+ set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request);
set_bit(R5_Wantcompute, &dev->flags);
sh->ops.target = disk_idx;
s->req_compute = 1;
- sh->ops.count++;
/* Careful: from this point on 'uptodate' is in the eye
* of raid5_run_ops which services 'compute' operations
* before writes. R5_Wantcompute flags a block that will
@@ -2016,53 +1944,40 @@ static int __handle_issuing_new_read_requests5(struct stripe_head *sh,
* subsequent operation.
*/
s->uptodate++;
- return 0; /* uptodate + compute == disks */
+ return 1; /* uptodate + compute == disks */
} else if (test_bit(R5_Insync, &dev->flags)) {
set_bit(R5_LOCKED, &dev->flags);
set_bit(R5_Wantread, &dev->flags);
- if (!test_and_set_bit(STRIPE_OP_IO, &sh->ops.pending))
- sh->ops.count++;
s->locked++;
pr_debug("Reading block %d (sync=%d)\n", disk_idx,
s->syncing);
}
}
- return ~0;
+ return 0;
}
-static void handle_issuing_new_read_requests5(struct stripe_head *sh,
+/**
+ * handle_stripe_fill5 - read or compute data to satisfy pending requests.
+ */
+static void handle_stripe_fill5(struct stripe_head *sh,
struct stripe_head_state *s, int disks)
{
int i;
- /* Clear completed compute operations. Parity recovery
- * (STRIPE_OP_MOD_REPAIR_PD) implies a write-back which is handled
- * later on in this routine
- */
- if (test_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.complete) &&
- !test_bit(STRIPE_OP_MOD_REPAIR_PD, &sh->ops.pending)) {
- clear_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.complete);
- clear_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.ack);
- clear_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.pending);
- }
-
/* look for blocks to read/compute, skip this if a compute
* is already in flight, or if the stripe contents are in the
* midst of changing due to a write
*/
- if (!test_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.pending) &&
- !test_bit(STRIPE_OP_PREXOR, &sh->ops.pending) &&
- !test_bit(STRIPE_OP_POSTXOR, &sh->ops.pending)) {
+ if (!test_bit(STRIPE_COMPUTE_RUN, &sh->state) && !sh->check_state &&
+ !sh->reconstruct_state)
for (i = disks; i--; )
- if (__handle_issuing_new_read_requests5(
- sh, s, i, disks) == 0)
+ if (fetch_block5(sh, s, i, disks))
break;
- }
set_bit(STRIPE_HANDLE, &sh->state);
}
-static void handle_issuing_new_read_requests6(struct stripe_head *sh,
+static void handle_stripe_fill6(struct stripe_head *sh,
struct stripe_head_state *s, struct r6_state *r6s,
int disks)
{
@@ -2121,12 +2036,12 @@ static void handle_issuing_new_read_requests6(struct stripe_head *sh,
}
-/* handle_completed_write_requests
+/* handle_stripe_clean_event
* any written block on an uptodate or failed drive can be returned.
* Note that if we 'wrote' to a failed drive, it will be UPTODATE, but
* never LOCKED, so we don't need to test 'failed' directly.
*/
-static void handle_completed_write_requests(raid5_conf_t *conf,
+static void handle_stripe_clean_event(raid5_conf_t *conf,
struct stripe_head *sh, int disks, struct bio **return_bi)
{
int i;
@@ -2147,7 +2062,7 @@ static void handle_completed_write_requests(raid5_conf_t *conf,
while (wbi && wbi->bi_sector <
dev->sector + STRIPE_SECTORS) {
wbi2 = r5_next_bio(wbi, dev->sector);
- if (--wbi->bi_phys_segments == 0) {
+ if (!raid5_dec_bi_phys_segments(wbi)) {
md_write_end(conf->mddev);
wbi->bi_next = *return_bi;
*return_bi = wbi;
@@ -2171,7 +2086,7 @@ static void handle_completed_write_requests(raid5_conf_t *conf,
md_wakeup_thread(conf->mddev->thread);
}
-static void handle_issuing_new_write_requests5(raid5_conf_t *conf,
+static void handle_stripe_dirtying5(raid5_conf_t *conf,
struct stripe_head *sh, struct stripe_head_state *s, int disks)
{
int rmw = 0, rcw = 0, i;
@@ -2215,9 +2130,6 @@ static void handle_issuing_new_write_requests5(raid5_conf_t *conf,
"%d for r-m-w\n", i);
set_bit(R5_LOCKED, &dev->flags);
set_bit(R5_Wantread, &dev->flags);
- if (!test_and_set_bit(
- STRIPE_OP_IO, &sh->ops.pending))
- sh->ops.count++;
s->locked++;
} else {
set_bit(STRIPE_DELAYED, &sh->state);
@@ -2241,9 +2153,6 @@ static void handle_issuing_new_write_requests5(raid5_conf_t *conf,
"%d for Reconstruct\n", i);
set_bit(R5_LOCKED, &dev->flags);
set_bit(R5_Wantread, &dev->flags);
- if (!test_and_set_bit(
- STRIPE_OP_IO, &sh->ops.pending))
- sh->ops.count++;
s->locked++;
} else {
set_bit(STRIPE_DELAYED, &sh->state);
@@ -2261,14 +2170,13 @@ static void handle_issuing_new_write_requests5(raid5_conf_t *conf,
* simultaneously. If this is not the case then new writes need to be
* held off until the compute completes.
*/
- if ((s->req_compute ||
- !test_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.pending)) &&
- (s->locked == 0 && (rcw == 0 || rmw == 0) &&
- !test_bit(STRIPE_BIT_DELAY, &sh->state)))
- s->locked += handle_write_operations5(sh, rcw == 0, 0);
+ if ((s->req_compute || !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) &&
+ (s->locked == 0 && (rcw == 0 || rmw == 0) &&
+ !test_bit(STRIPE_BIT_DELAY, &sh->state)))
+ schedule_reconstruction5(sh, s, rcw == 0, 0);
}
-static void handle_issuing_new_write_requests6(raid5_conf_t *conf,
+static void handle_stripe_dirtying6(raid5_conf_t *conf,
struct stripe_head *sh, struct stripe_head_state *s,
struct r6_state *r6s, int disks)
{
@@ -2371,92 +2279,86 @@ static void handle_issuing_new_write_requests6(raid5_conf_t *conf,
static void handle_parity_checks5(raid5_conf_t *conf, struct stripe_head *sh,
struct stripe_head_state *s, int disks)
{
- int canceled_check = 0;
+ struct r5dev *dev = NULL;
set_bit(STRIPE_HANDLE, &sh->state);
- /* complete a check operation */
- if (test_and_clear_bit(STRIPE_OP_CHECK, &sh->ops.complete)) {
- clear_bit(STRIPE_OP_CHECK, &sh->ops.ack);
- clear_bit(STRIPE_OP_CHECK, &sh->ops.pending);
+ switch (sh->check_state) {
+ case check_state_idle:
+ /* start a new check operation if there are no failures */
if (s->failed == 0) {
- if (sh->ops.zero_sum_result == 0)
- /* parity is correct (on disc,
- * not in buffer any more)
- */
- set_bit(STRIPE_INSYNC, &sh->state);
- else {
- conf->mddev->resync_mismatches +=
- STRIPE_SECTORS;
- if (test_bit(
- MD_RECOVERY_CHECK, &conf->mddev->recovery))
- /* don't try to repair!! */
- set_bit(STRIPE_INSYNC, &sh->state);
- else {
- set_bit(STRIPE_OP_COMPUTE_BLK,
- &sh->ops.pending);
- set_bit(STRIPE_OP_MOD_REPAIR_PD,
- &sh->ops.pending);
- set_bit(R5_Wantcompute,
- &sh->dev[sh->pd_idx].flags);
- sh->ops.target = sh->pd_idx;
- sh->ops.count++;
- s->uptodate++;
- }
- }
- } else
- canceled_check = 1; /* STRIPE_INSYNC is not set */
- }
-
- /* start a new check operation if there are no failures, the stripe is
- * not insync, and a repair is not in flight
- */
- if (s->failed == 0 &&
- !test_bit(STRIPE_INSYNC, &sh->state) &&
- !test_bit(STRIPE_OP_MOD_REPAIR_PD, &sh->ops.pending)) {
- if (!test_and_set_bit(STRIPE_OP_CHECK, &sh->ops.pending)) {
BUG_ON(s->uptodate != disks);
+ sh->check_state = check_state_run;
+ set_bit(STRIPE_OP_CHECK, &s->ops_request);
clear_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags);
- sh->ops.count++;
s->uptodate--;
+ break;
}
- }
-
- /* check if we can clear a parity disk reconstruct */
- if (test_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.complete) &&
- test_bit(STRIPE_OP_MOD_REPAIR_PD, &sh->ops.pending)) {
-
- clear_bit(STRIPE_OP_MOD_REPAIR_PD, &sh->ops.pending);
- clear_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.complete);
- clear_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.ack);
- clear_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.pending);
- }
-
+ dev = &sh->dev[s->failed_num];
+ /* fall through */
+ case check_state_compute_result:
+ sh->check_state = check_state_idle;
+ if (!dev)
+ dev = &sh->dev[sh->pd_idx];
+
+ /* check that a write has not made the stripe insync */
+ if (test_bit(STRIPE_INSYNC, &sh->state))
+ break;
- /* Wait for check parity and compute block operations to complete
- * before write-back. If a failure occurred while the check operation
- * was in flight we need to cycle this stripe through handle_stripe
- * since the parity block may not be uptodate
- */
- if (!canceled_check && !test_bit(STRIPE_INSYNC, &sh->state) &&
- !test_bit(STRIPE_OP_CHECK, &sh->ops.pending) &&
- !test_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.pending)) {
- struct r5dev *dev;
/* either failed parity check, or recovery is happening */
- if (s->failed == 0)
- s->failed_num = sh->pd_idx;
- dev = &sh->dev[s->failed_num];
BUG_ON(!test_bit(R5_UPTODATE, &dev->flags));
BUG_ON(s->uptodate != disks);
set_bit(R5_LOCKED, &dev->flags);
+ s->locked++;
set_bit(R5_Wantwrite, &dev->flags);
- if (!test_and_set_bit(STRIPE_OP_IO, &sh->ops.pending))
- sh->ops.count++;
clear_bit(STRIPE_DEGRADED, &sh->state);
- s->locked++;
set_bit(STRIPE_INSYNC, &sh->state);
+ break;
+ case check_state_run:
+ break; /* we will be called again upon completion */
+ case check_state_check_result:
+ sh->check_state = check_state_idle;
+
+ /* if a failure occurred during the check operation, leave
+ * STRIPE_INSYNC not set and let the stripe be handled again
+ */
+ if (s->failed)
+ break;
+
+ /* handle a successful check operation, if parity is correct
+ * we are done. Otherwise update the mismatch count and repair
+ * parity if !MD_RECOVERY_CHECK
+ */
+ if (sh->ops.zero_sum_result == 0)
+ /* parity is correct (on disc,
+ * not in buffer any more)
+ */
+ set_bit(STRIPE_INSYNC, &sh->state);
+ else {
+ conf->mddev->resync_mismatches += STRIPE_SECTORS;
+ if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery))
+ /* don't try to repair!! */
+ set_bit(STRIPE_INSYNC, &sh->state);
+ else {
+ sh->check_state = check_state_compute_run;
+ set_bit(STRIPE_COMPUTE_RUN, &sh->state);
+ set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request);
+ set_bit(R5_Wantcompute,
+ &sh->dev[sh->pd_idx].flags);
+ sh->ops.target = sh->pd_idx;
+ s->uptodate++;
+ }
+ }
+ break;
+ case check_state_compute_run:
+ break;
+ default:
+ printk(KERN_ERR "%s: unknown check_state: %d sector: %llu\n",
+ __func__, sh->check_state,
+ (unsigned long long) sh->sector);
+ BUG();
}
}
@@ -2634,22 +2536,21 @@ static void handle_stripe_expansion(raid5_conf_t *conf, struct stripe_head *sh,
*
*/
-static void handle_stripe5(struct stripe_head *sh)
+static bool handle_stripe5(struct stripe_head *sh)
{
raid5_conf_t *conf = sh->raid_conf;
int disks = sh->disks, i;
struct bio *return_bi = NULL;
struct stripe_head_state s;
struct r5dev *dev;
- unsigned long pending = 0;
mdk_rdev_t *blocked_rdev = NULL;
int prexor;
memset(&s, 0, sizeof(s));
- pr_debug("handling stripe %llu, state=%#lx cnt=%d, pd_idx=%d "
- "ops=%lx:%lx:%lx\n", (unsigned long long)sh->sector, sh->state,
- atomic_read(&sh->count), sh->pd_idx,
- sh->ops.pending, sh->ops.ack, sh->ops.complete);
+ pr_debug("handling stripe %llu, state=%#lx cnt=%d, pd_idx=%d check:%d "
+ "reconstruct:%d\n", (unsigned long long)sh->sector, sh->state,
+ atomic_read(&sh->count), sh->pd_idx, sh->check_state,
+ sh->reconstruct_state);
spin_lock(&sh->lock);
clear_bit(STRIPE_HANDLE, &sh->state);
@@ -2658,15 +2559,8 @@ static void handle_stripe5(struct stripe_head *sh)
s.syncing = test_bit(STRIPE_SYNCING, &sh->state);
s.expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state);
s.expanded = test_bit(STRIPE_EXPAND_READY, &sh->state);
- /* Now to look around and see what can be done */
-
- /* clean-up completed biofill operations */
- if (test_bit(STRIPE_OP_BIOFILL, &sh->ops.complete)) {
- clear_bit(STRIPE_OP_BIOFILL, &sh->ops.pending);
- clear_bit(STRIPE_OP_BIOFILL, &sh->ops.ack);
- clear_bit(STRIPE_OP_BIOFILL, &sh->ops.complete);
- }
+ /* Now to look around and see what can be done */
rcu_read_lock();
for (i=disks; i--; ) {
mdk_rdev_t *rdev;
@@ -2680,10 +2574,10 @@ static void handle_stripe5(struct stripe_head *sh)
/* maybe we can request a biofill operation
*
* new wantfill requests are only permitted while
- * STRIPE_OP_BIOFILL is clear
+ * ops_complete_biofill is guaranteed to be inactive
*/
if (test_bit(R5_UPTODATE, &dev->flags) && dev->toread &&
- !test_bit(STRIPE_OP_BIOFILL, &sh->ops.pending))
+ !test_bit(STRIPE_BIOFILL_RUN, &sh->state))
set_bit(R5_Wantfill, &dev->flags);
/* now count some things */
@@ -2703,10 +2597,10 @@ static void handle_stripe5(struct stripe_head *sh)
if (dev->written)
s.written++;
rdev = rcu_dereference(conf->disks[i].rdev);
- if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
+ if (blocked_rdev == NULL &&
+ rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
blocked_rdev = rdev;
atomic_inc(&rdev->nr_pending);
- break;
}
if (!rdev || !test_bit(In_sync, &rdev->flags)) {
/* The ReadError flag will just be confusing now */
@@ -2723,12 +2617,20 @@ static void handle_stripe5(struct stripe_head *sh)
rcu_read_unlock();
if (unlikely(blocked_rdev)) {
- set_bit(STRIPE_HANDLE, &sh->state);
- goto unlock;
+ if (s.syncing || s.expanding || s.expanded ||
+ s.to_write || s.written) {
+ set_bit(STRIPE_HANDLE, &sh->state);
+ goto unlock;
+ }
+ /* There is nothing for the blocked_rdev to block */
+ rdev_dec_pending(blocked_rdev, conf->mddev);
+ blocked_rdev = NULL;
}
- if (s.to_fill && !test_and_set_bit(STRIPE_OP_BIOFILL, &sh->ops.pending))
- sh->ops.count++;
+ if (s.to_fill && !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) {
+ set_bit(STRIPE_OP_BIOFILL, &s.ops_request);
+ set_bit(STRIPE_BIOFILL_RUN, &sh->state);
+ }
pr_debug("locked=%d uptodate=%d to_read=%d"
" to_write=%d failed=%d failed_num=%d\n",
@@ -2738,8 +2640,7 @@ static void handle_stripe5(struct stripe_head *sh)
* need to be failed
*/
if (s.failed > 1 && s.to_read+s.to_write+s.written)
- handle_requests_to_failed_array(conf, sh, &s, disks,
- &return_bi);
+ handle_failed_stripe(conf, sh, &s, disks, &return_bi);
if (s.failed > 1 && s.syncing) {
md_done_sync(conf->mddev, STRIPE_SECTORS,0);
clear_bit(STRIPE_SYNCING, &sh->state);
@@ -2755,48 +2656,25 @@ static void handle_stripe5(struct stripe_head *sh)
!test_bit(R5_LOCKED, &dev->flags) &&
test_bit(R5_UPTODATE, &dev->flags)) ||
(s.failed == 1 && s.failed_num == sh->pd_idx)))
- handle_completed_write_requests(conf, sh, disks, &return_bi);
+ handle_stripe_clean_event(conf, sh, disks, &return_bi);
/* Now we might consider reading some blocks, either to check/generate
* parity, or to satisfy requests
* or to load a block that is being partially written.
*/
if (s.to_read || s.non_overwrite ||
- (s.syncing && (s.uptodate + s.compute < disks)) || s.expanding ||
- test_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.pending))
- handle_issuing_new_read_requests5(sh, &s, disks);
+ (s.syncing && (s.uptodate + s.compute < disks)) || s.expanding)
+ handle_stripe_fill5(sh, &s, disks);
/* Now we check to see if any write operations have recently
* completed
*/
-
- /* leave prexor set until postxor is done, allows us to distinguish
- * a rmw from a rcw during biodrain
- */
prexor = 0;
- if (test_bit(STRIPE_OP_PREXOR, &sh->ops.complete) &&
- test_bit(STRIPE_OP_POSTXOR, &sh->ops.complete)) {
-
+ if (sh->reconstruct_state == reconstruct_state_prexor_drain_result)
prexor = 1;
- clear_bit(STRIPE_OP_PREXOR, &sh->ops.complete);
- clear_bit(STRIPE_OP_PREXOR, &sh->ops.ack);
- clear_bit(STRIPE_OP_PREXOR, &sh->ops.pending);
-
- for (i = disks; i--; )
- clear_bit(R5_Wantprexor, &sh->dev[i].flags);
- }
-
- /* if only POSTXOR is set then this is an 'expand' postxor */
- if (test_bit(STRIPE_OP_BIODRAIN, &sh->ops.complete) &&
- test_bit(STRIPE_OP_POSTXOR, &sh->ops.complete)) {
-
- clear_bit(STRIPE_OP_BIODRAIN, &sh->ops.complete);
- clear_bit(STRIPE_OP_BIODRAIN, &sh->ops.ack);
- clear_bit(STRIPE_OP_BIODRAIN, &sh->ops.pending);
-
- clear_bit(STRIPE_OP_POSTXOR, &sh->ops.complete);
- clear_bit(STRIPE_OP_POSTXOR, &sh->ops.ack);
- clear_bit(STRIPE_OP_POSTXOR, &sh->ops.pending);
+ if (sh->reconstruct_state == reconstruct_state_drain_result ||
+ sh->reconstruct_state == reconstruct_state_prexor_drain_result) {
+ sh->reconstruct_state = reconstruct_state_idle;
/* All the 'written' buffers and the parity block are ready to
* be written back to disk
@@ -2808,9 +2686,6 @@ static void handle_stripe5(struct stripe_head *sh)
(i == sh->pd_idx || dev->written)) {
pr_debug("Writing block %d\n", i);
set_bit(R5_Wantwrite, &dev->flags);
- if (!test_and_set_bit(
- STRIPE_OP_IO, &sh->ops.pending))
- sh->ops.count++;
if (prexor)
continue;
if (!test_bit(R5_Insync, &dev->flags) ||
@@ -2832,20 +2707,18 @@ static void handle_stripe5(struct stripe_head *sh)
* 2/ A 'check' operation is in flight, as it may clobber the parity
* block.
*/
- if (s.to_write && !test_bit(STRIPE_OP_POSTXOR, &sh->ops.pending) &&
- !test_bit(STRIPE_OP_CHECK, &sh->ops.pending))
- handle_issuing_new_write_requests5(conf, sh, &s, disks);
+ if (s.to_write && !sh->reconstruct_state && !sh->check_state)
+ handle_stripe_dirtying5(conf, sh, &s, disks);
/* maybe we need to check and possibly fix the parity for this stripe
* Any reads will already have been scheduled, so we just see if enough
* data is available. The parity check is held off while parity
* dependent operations are in flight.
*/
- if ((s.syncing && s.locked == 0 &&
- !test_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.pending) &&
- !test_bit(STRIPE_INSYNC, &sh->state)) ||
- test_bit(STRIPE_OP_CHECK, &sh->ops.pending) ||
- test_bit(STRIPE_OP_MOD_REPAIR_PD, &sh->ops.pending))
+ if (sh->check_state ||
+ (s.syncing && s.locked == 0 &&
+ !test_bit(STRIPE_COMPUTE_RUN, &sh->state) &&
+ !test_bit(STRIPE_INSYNC, &sh->state)))
handle_parity_checks5(conf, sh, &s, disks);
if (s.syncing && s.locked == 0 && test_bit(STRIPE_INSYNC, &sh->state)) {
@@ -2864,52 +2737,36 @@ static void handle_stripe5(struct stripe_head *sh)
dev = &sh->dev[s.failed_num];
if (!test_bit(R5_ReWrite, &dev->flags)) {
set_bit(R5_Wantwrite, &dev->flags);
- if (!test_and_set_bit(STRIPE_OP_IO, &sh->ops.pending))
- sh->ops.count++;
set_bit(R5_ReWrite, &dev->flags);
set_bit(R5_LOCKED, &dev->flags);
s.locked++;
} else {
/* let's read it back */
set_bit(R5_Wantread, &dev->flags);
- if (!test_and_set_bit(STRIPE_OP_IO, &sh->ops.pending))
- sh->ops.count++;
set_bit(R5_LOCKED, &dev->flags);
s.locked++;
}
}
- /* Finish postxor operations initiated by the expansion
- * process
- */
- if (test_bit(STRIPE_OP_POSTXOR, &sh->ops.complete) &&
- !test_bit(STRIPE_OP_BIODRAIN, &sh->ops.pending)) {
-
+ /* Finish reconstruct operations initiated by the expansion process */
+ if (sh->reconstruct_state == reconstruct_state_result) {
+ sh->reconstruct_state = reconstruct_state_idle;
clear_bit(STRIPE_EXPANDING, &sh->state);
-
- clear_bit(STRIPE_OP_POSTXOR, &sh->ops.pending);
- clear_bit(STRIPE_OP_POSTXOR, &sh->ops.ack);
- clear_bit(STRIPE_OP_POSTXOR, &sh->ops.complete);
-
for (i = conf->raid_disks; i--; ) {
set_bit(R5_Wantwrite, &sh->dev[i].flags);
- set_bit(R5_LOCKED, &dev->flags);
+ set_bit(R5_LOCKED, &sh->dev[i].flags);
s.locked++;
- if (!test_and_set_bit(STRIPE_OP_IO, &sh->ops.pending))
- sh->ops.count++;
}
}
if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state) &&
- !test_bit(STRIPE_OP_POSTXOR, &sh->ops.pending)) {
+ !sh->reconstruct_state) {
/* Need to write out all blocks after computing parity */
sh->disks = conf->raid_disks;
sh->pd_idx = stripe_to_pdidx(sh->sector, conf,
conf->raid_disks);
- s.locked += handle_write_operations5(sh, 1, 1);
- } else if (s.expanded &&
- s.locked == 0 &&
- !test_bit(STRIPE_OP_POSTXOR, &sh->ops.pending)) {
+ schedule_reconstruction5(sh, &s, 1, 1);
+ } else if (s.expanded && !sh->reconstruct_state && s.locked == 0) {
clear_bit(STRIPE_EXPAND_READY, &sh->state);
atomic_dec(&conf->reshape_stripes);
wake_up(&conf->wait_for_overlap);
@@ -2917,12 +2774,9 @@ static void handle_stripe5(struct stripe_head *sh)
}
if (s.expanding && s.locked == 0 &&
- !test_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.pending))
+ !test_bit(STRIPE_COMPUTE_RUN, &sh->state))
handle_stripe_expansion(conf, sh, NULL);
- if (sh->ops.count)
- pending = get_stripe_work(sh);
-
unlock:
spin_unlock(&sh->lock);
@@ -2930,14 +2784,17 @@ static void handle_stripe5(struct stripe_head *sh)
if (unlikely(blocked_rdev))
md_wait_for_blocked_rdev(blocked_rdev, conf->mddev);
- if (pending)
- raid5_run_ops(sh, pending);
+ if (s.ops_request)
+ raid5_run_ops(sh, s.ops_request);
+
+ ops_run_io(sh, &s);
return_io(return_bi);
+ return blocked_rdev == NULL;
}
-static void handle_stripe6(struct stripe_head *sh, struct page *tmp_page)
+static bool handle_stripe6(struct stripe_head *sh, struct page *tmp_page)
{
raid6_conf_t *conf = sh->raid_conf;
int disks = sh->disks;
@@ -2986,7 +2843,7 @@ static void handle_stripe6(struct stripe_head *sh, struct page *tmp_page)
copy_data(0, rbi, dev->page, dev->sector);
rbi2 = r5_next_bio(rbi, dev->sector);
spin_lock_irq(&conf->device_lock);
- if (--rbi->bi_phys_segments == 0) {
+ if (!raid5_dec_bi_phys_segments(rbi)) {
rbi->bi_next = return_bi;
return_bi = rbi;
}
@@ -3010,10 +2867,10 @@ static void handle_stripe6(struct stripe_head *sh, struct page *tmp_page)
if (dev->written)
s.written++;
rdev = rcu_dereference(conf->disks[i].rdev);
- if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
+ if (blocked_rdev == NULL &&
+ rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
blocked_rdev = rdev;
atomic_inc(&rdev->nr_pending);
- break;
}
if (!rdev || !test_bit(In_sync, &rdev->flags)) {
/* The ReadError flag will just be confusing now */
@@ -3031,9 +2888,16 @@ static void handle_stripe6(struct stripe_head *sh, struct page *tmp_page)
rcu_read_unlock();
if (unlikely(blocked_rdev)) {
- set_bit(STRIPE_HANDLE, &sh->state);
- goto unlock;
+ if (s.syncing || s.expanding || s.expanded ||
+ s.to_write || s.written) {
+ set_bit(STRIPE_HANDLE, &sh->state);
+ goto unlock;
+ }
+ /* There is nothing for the blocked_rdev to block */
+ rdev_dec_pending(blocked_rdev, conf->mddev);
+ blocked_rdev = NULL;
}
+
pr_debug("locked=%d uptodate=%d to_read=%d"
" to_write=%d failed=%d failed_num=%d,%d\n",
s.locked, s.uptodate, s.to_read, s.to_write, s.failed,
@@ -3042,8 +2906,7 @@ static void handle_stripe6(struct stripe_head *sh, struct page *tmp_page)
* might need to be failed
*/
if (s.failed > 2 && s.to_read+s.to_write+s.written)
- handle_requests_to_failed_array(conf, sh, &s, disks,
- &return_bi);
+ handle_failed_stripe(conf, sh, &s, disks, &return_bi);
if (s.failed > 2 && s.syncing) {
md_done_sync(conf->mddev, STRIPE_SECTORS,0);
clear_bit(STRIPE_SYNCING, &sh->state);
@@ -3068,7 +2931,7 @@ static void handle_stripe6(struct stripe_head *sh, struct page *tmp_page)
( r6s.q_failed || ((test_bit(R5_Insync, &qdev->flags)
&& !test_bit(R5_LOCKED, &qdev->flags)
&& test_bit(R5_UPTODATE, &qdev->flags)))))
- handle_completed_write_requests(conf, sh, disks, &return_bi);
+ handle_stripe_clean_event(conf, sh, disks, &return_bi);
/* Now we might consider reading some blocks, either to check/generate
* parity, or to satisfy requests
@@ -3076,11 +2939,11 @@ static void handle_stripe6(struct stripe_head *sh, struct page *tmp_page)
*/
if (s.to_read || s.non_overwrite || (s.to_write && s.failed) ||
(s.syncing && (s.uptodate < disks)) || s.expanding)
- handle_issuing_new_read_requests6(sh, &s, &r6s, disks);
+ handle_stripe_fill6(sh, &s, &r6s, disks);
/* now to consider writing and what else, if anything should be read */
if (s.to_write)
- handle_issuing_new_write_requests6(conf, sh, &s, &r6s, disks);
+ handle_stripe_dirtying6(conf, sh, &s, &r6s, disks);
/* maybe we need to check and possibly fix the parity for this stripe
* Any reads will already have been scheduled, so we just see if enough
@@ -3136,7 +2999,7 @@ static void handle_stripe6(struct stripe_head *sh, struct page *tmp_page)
}
if (s.expanding && s.locked == 0 &&
- !test_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.pending))
+ !test_bit(STRIPE_COMPUTE_RUN, &sh->state))
handle_stripe_expansion(conf, sh, &r6s);
unlock:
@@ -3146,76 +3009,20 @@ static void handle_stripe6(struct stripe_head *sh, struct page *tmp_page)
if (unlikely(blocked_rdev))
md_wait_for_blocked_rdev(blocked_rdev, conf->mddev);
- return_io(return_bi);
-
- for (i=disks; i-- ;) {
- int rw;
- struct bio *bi;
- mdk_rdev_t *rdev;
- if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags))
- rw = WRITE;
- else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags))
- rw = READ;
- else
- continue;
+ ops_run_io(sh, &s);
- set_bit(STRIPE_IO_STARTED, &sh->state);
-
- bi = &sh->dev[i].req;
-
- bi->bi_rw = rw;
- if (rw == WRITE)
- bi->bi_end_io = raid5_end_write_request;
- else
- bi->bi_end_io = raid5_end_read_request;
-
- rcu_read_lock();
- rdev = rcu_dereference(conf->disks[i].rdev);
- if (rdev && test_bit(Faulty, &rdev->flags))
- rdev = NULL;
- if (rdev)
- atomic_inc(&rdev->nr_pending);
- rcu_read_unlock();
-
- if (rdev) {
- if (s.syncing || s.expanding || s.expanded)
- md_sync_acct(rdev->bdev, STRIPE_SECTORS);
+ return_io(return_bi);
- bi->bi_bdev = rdev->bdev;
- pr_debug("for %llu schedule op %ld on disc %d\n",
- (unsigned long long)sh->sector, bi->bi_rw, i);
- atomic_inc(&sh->count);
- bi->bi_sector = sh->sector + rdev->data_offset;
- bi->bi_flags = 1 << BIO_UPTODATE;
- bi->bi_vcnt = 1;
- bi->bi_max_vecs = 1;
- bi->bi_idx = 0;
- bi->bi_io_vec = &sh->dev[i].vec;
- bi->bi_io_vec[0].bv_len = STRIPE_SIZE;
- bi->bi_io_vec[0].bv_offset = 0;
- bi->bi_size = STRIPE_SIZE;
- bi->bi_next = NULL;
- if (rw == WRITE &&
- test_bit(R5_ReWrite, &sh->dev[i].flags))
- atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
- generic_make_request(bi);
- } else {
- if (rw == WRITE)
- set_bit(STRIPE_DEGRADED, &sh->state);
- pr_debug("skip op %ld on disc %d for sector %llu\n",
- bi->bi_rw, i, (unsigned long long)sh->sector);
- clear_bit(R5_LOCKED, &sh->dev[i].flags);
- set_bit(STRIPE_HANDLE, &sh->state);
- }
- }
+ return blocked_rdev == NULL;
}
-static void handle_stripe(struct stripe_head *sh, struct page *tmp_page)
+/* returns true if the stripe was handled */
+static bool handle_stripe(struct stripe_head *sh, struct page *tmp_page)
{
if (sh->raid_conf->level == 6)
- handle_stripe6(sh, tmp_page);
+ return handle_stripe6(sh, tmp_page);
else
- handle_stripe5(sh);
+ return handle_stripe5(sh);
}
@@ -3377,8 +3184,11 @@ static struct bio *remove_bio_from_retry(raid5_conf_t *conf)
if(bi) {
conf->retry_read_aligned_list = bi->bi_next;
bi->bi_next = NULL;
+ /*
+ * this sets the active strip count to 1 and the processed
+ * strip count to zero (upper 8 bits)
+ */
bi->bi_phys_segments = 1; /* biased count of active stripes */
- bi->bi_hw_segments = 0; /* count of processed stripes */
}
return bi;
@@ -3428,8 +3238,7 @@ static int bio_fits_rdev(struct bio *bi)
if ((bi->bi_size>>9) > q->max_sectors)
return 0;
blk_recount_segments(q, bi);
- if (bi->bi_phys_segments > q->max_phys_segments ||
- bi->bi_hw_segments > q->max_hw_segments)
+ if (bi->bi_phys_segments > q->max_phys_segments)
return 0;
if (q->merge_bvec_fn)
@@ -3573,7 +3382,7 @@ static int make_request(struct request_queue *q, struct bio * bi)
sector_t logical_sector, last_sector;
struct stripe_head *sh;
const int rw = bio_data_dir(bi);
- int remaining;
+ int cpu, remaining;
if (unlikely(bio_barrier(bi))) {
bio_endio(bi, -EOPNOTSUPP);
@@ -3582,8 +3391,11 @@ static int make_request(struct request_queue *q, struct bio * bi)
md_write_start(mddev, bi);
- disk_stat_inc(mddev->gendisk, ios[rw]);
- disk_stat_add(mddev->gendisk, sectors[rw], bio_sectors(bi));
+ cpu = part_stat_lock();
+ part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
+ part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw],
+ bio_sectors(bi));
+ part_stat_unlock();
if (rw == READ &&
mddev->reshape_position == MaxSector &&
@@ -3690,16 +3502,14 @@ static int make_request(struct request_queue *q, struct bio * bi)
}
spin_lock_irq(&conf->device_lock);
- remaining = --bi->bi_phys_segments;
+ remaining = raid5_dec_bi_phys_segments(bi);
spin_unlock_irq(&conf->device_lock);
if (remaining == 0) {
if ( rw == WRITE )
md_write_end(mddev);
- bi->bi_end_io(bi,
- test_bit(BIO_UPTODATE, &bi->bi_flags)
- ? 0 : -EIO);
+ bio_endio(bi, 0);
}
return 0;
}
@@ -3785,7 +3595,7 @@ static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped
j == raid6_next_disk(sh->pd_idx, sh->disks))
continue;
s = compute_blocknr(sh, j);
- if (s < (mddev->array_size<<1)) {
+ if (s < mddev->array_sectors) {
skipped = 1;
continue;
}
@@ -3935,7 +3745,9 @@ static inline sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *ski
clear_bit(STRIPE_INSYNC, &sh->state);
spin_unlock(&sh->lock);
- handle_stripe(sh, NULL);
+ /* wait for any blocked device to be handled */
+ while(unlikely(!handle_stripe(sh, NULL)))
+ ;
release_stripe(sh);
return STRIPE_SECTORS;
@@ -3974,7 +3786,7 @@ static int retry_aligned_read(raid5_conf_t *conf, struct bio *raid_bio)
sector += STRIPE_SECTORS,
scnt++) {
- if (scnt < raid_bio->bi_hw_segments)
+ if (scnt < raid5_bi_hw_segments(raid_bio))
/* already done this stripe */
continue;
@@ -3982,7 +3794,7 @@ static int retry_aligned_read(raid5_conf_t *conf, struct bio *raid_bio)
if (!sh) {
/* failed to get a stripe - must wait */
- raid_bio->bi_hw_segments = scnt;
+ raid5_set_bi_hw_segments(raid_bio, scnt);
conf->retry_read_aligned = raid_bio;
return handled;
}
@@ -3990,7 +3802,7 @@ static int retry_aligned_read(raid5_conf_t *conf, struct bio *raid_bio)
set_bit(R5_ReadError, &sh->dev[dd_idx].flags);
if (!add_stripe_bio(sh, raid_bio, dd_idx, 0)) {
release_stripe(sh);
- raid_bio->bi_hw_segments = scnt;
+ raid5_set_bi_hw_segments(raid_bio, scnt);
conf->retry_read_aligned = raid_bio;
return handled;
}
@@ -4000,14 +3812,10 @@ static int retry_aligned_read(raid5_conf_t *conf, struct bio *raid_bio)
handled++;
}
spin_lock_irq(&conf->device_lock);
- remaining = --raid_bio->bi_phys_segments;
+ remaining = raid5_dec_bi_phys_segments(raid_bio);
spin_unlock_irq(&conf->device_lock);
- if (remaining == 0) {
-
- raid_bio->bi_end_io(raid_bio,
- test_bit(BIO_UPTODATE, &raid_bio->bi_flags)
- ? 0 : -EIO);
- }
+ if (remaining == 0)
+ bio_endio(raid_bio, 0);
if (atomic_dec_and_test(&conf->active_aligned_reads))
wake_up(&conf->wait_for_stripe);
return handled;
@@ -4058,10 +3866,8 @@ static void raid5d(mddev_t *mddev)
sh = __get_priority_stripe(conf);
- if (!sh) {
- async_tx_issue_pending_all();
+ if (!sh)
break;
- }
spin_unlock_irq(&conf->device_lock);
handled++;
@@ -4074,6 +3880,7 @@ static void raid5d(mddev_t *mddev)
spin_unlock_irq(&conf->device_lock);
+ async_tx_issue_pending_all();
unplug_slaves(mddev);
pr_debug("--- raid5d inactive\n");
@@ -4094,6 +3901,8 @@ raid5_store_stripe_cache_size(mddev_t *mddev, const char *page, size_t len)
{
raid5_conf_t *conf = mddev_to_conf(mddev);
unsigned long new;
+ int err;
+
if (len >= PAGE_SIZE)
return -EINVAL;
if (!conf)
@@ -4109,7 +3918,9 @@ raid5_store_stripe_cache_size(mddev_t *mddev, const char *page, size_t len)
else
break;
}
- md_allow_write(mddev);
+ err = md_allow_write(mddev);
+ if (err)
+ return err;
while (new > conf->max_nr_stripes) {
if (grow_one_stripe(conf))
conf->max_nr_stripes++;
@@ -4196,6 +4007,13 @@ static int run(mddev_t *mddev)
return -EIO;
}
+ if (mddev->chunk_size < PAGE_SIZE) {
+ printk(KERN_ERR "md/raid5: chunk_size must be at least "
+ "PAGE_SIZE but %d < %ld\n",
+ mddev->chunk_size, PAGE_SIZE);
+ return -EINVAL;
+ }
+
if (mddev->reshape_position != MaxSector) {
/* Check that we can continue the reshape.
* Currently only disks can change, it must
@@ -4434,7 +4252,7 @@ static int run(mddev_t *mddev)
mddev->queue->backing_dev_info.congested_data = mddev;
mddev->queue->backing_dev_info.congested_fn = raid5_congested;
- mddev->array_size = mddev->size * (conf->previous_raid_disks -
+ mddev->array_sectors = 2 * mddev->size * (conf->previous_raid_disks -
conf->max_degraded);
blk_queue_merge_bvec(mddev->queue, raid5_mergeable_bvec);
@@ -4473,7 +4291,7 @@ static int stop(mddev_t *mddev)
}
#ifdef DEBUG
-static void print_sh (struct seq_file *seq, struct stripe_head *sh)
+static void print_sh(struct seq_file *seq, struct stripe_head *sh)
{
int i;
@@ -4489,7 +4307,7 @@ static void print_sh (struct seq_file *seq, struct stripe_head *sh)
seq_printf(seq, "\n");
}
-static void printall (struct seq_file *seq, raid5_conf_t *conf)
+static void printall(struct seq_file *seq, raid5_conf_t *conf)
{
struct stripe_head *sh;
struct hlist_node *hn;
@@ -4507,7 +4325,7 @@ static void printall (struct seq_file *seq, raid5_conf_t *conf)
}
#endif
-static void status (struct seq_file *seq, mddev_t *mddev)
+static void status(struct seq_file *seq, mddev_t *mddev)
{
raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
int i;
@@ -4609,35 +4427,41 @@ abort:
static int raid5_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
{
raid5_conf_t *conf = mddev->private;
- int found = 0;
+ int err = -EEXIST;
int disk;
struct disk_info *p;
+ int first = 0;
+ int last = conf->raid_disks - 1;
if (mddev->degraded > conf->max_degraded)
/* no point adding a device */
- return 0;
+ return -EINVAL;
+
+ if (rdev->raid_disk >= 0)
+ first = last = rdev->raid_disk;
/*
* find the disk ... but prefer rdev->saved_raid_disk
* if possible.
*/
if (rdev->saved_raid_disk >= 0 &&
+ rdev->saved_raid_disk >= first &&
conf->disks[rdev->saved_raid_disk].rdev == NULL)
disk = rdev->saved_raid_disk;
else
- disk = 0;
- for ( ; disk < conf->raid_disks; disk++)
+ disk = first;
+ for ( ; disk <= last ; disk++)
if ((p=conf->disks + disk)->rdev == NULL) {
clear_bit(In_sync, &rdev->flags);
rdev->raid_disk = disk;
- found = 1;
+ err = 0;
if (rdev->saved_raid_disk != disk)
conf->fullsync = 1;
rcu_assign_pointer(p->rdev, rdev);
break;
}
print_raid5_conf(conf);
- return found;
+ return err;
}
static int raid5_resize(mddev_t *mddev, sector_t sectors)
@@ -4652,8 +4476,9 @@ static int raid5_resize(mddev_t *mddev, sector_t sectors)
raid5_conf_t *conf = mddev_to_conf(mddev);
sectors &= ~((sector_t)mddev->chunk_size/512 - 1);
- mddev->array_size = (sectors * (mddev->raid_disks-conf->max_degraded))>>1;
- set_capacity(mddev->gendisk, mddev->array_size << 1);
+ mddev->array_sectors = sectors * (mddev->raid_disks
+ - conf->max_degraded);
+ set_capacity(mddev->gendisk, mddev->array_sectors);
mddev->changed = 1;
if (sectors/2 > mddev->size && mddev->recovery_cp == MaxSector) {
mddev->recovery_cp = mddev->size << 1;
@@ -4675,6 +4500,9 @@ static int raid5_check_reshape(mddev_t *mddev)
return -EINVAL; /* Cannot shrink array or change level yet */
if (mddev->delta_disks == 0)
return 0; /* nothing to do */
+ if (mddev->bitmap)
+ /* Cannot grow a bitmap yet */
+ return -EBUSY;
/* Can only proceed if there are plenty of stripe_heads.
* We need a minimum of one full stripe,, and for sensible progress
@@ -4738,7 +4566,7 @@ static int raid5_start_reshape(mddev_t *mddev)
rdev_for_each(rdev, rtmp, mddev)
if (rdev->raid_disk < 0 &&
!test_bit(Faulty, &rdev->flags)) {
- if (raid5_add_disk(mddev, rdev)) {
+ if (raid5_add_disk(mddev, rdev) == 0) {
char nm[20];
set_bit(In_sync, &rdev->flags);
added_devices++;
@@ -4786,15 +4614,16 @@ static void end_reshape(raid5_conf_t *conf)
struct block_device *bdev;
if (!test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) {
- conf->mddev->array_size = conf->mddev->size *
+ conf->mddev->array_sectors = 2 * conf->mddev->size *
(conf->raid_disks - conf->max_degraded);
- set_capacity(conf->mddev->gendisk, conf->mddev->array_size << 1);
+ set_capacity(conf->mddev->gendisk, conf->mddev->array_sectors);
conf->mddev->changed = 1;
bdev = bdget_disk(conf->mddev->gendisk, 0);
if (bdev) {
mutex_lock(&bdev->bd_inode->i_mutex);
- i_size_write(bdev->bd_inode, (loff_t)conf->mddev->array_size << 10);
+ i_size_write(bdev->bd_inode,
+ (loff_t)conf->mddev->array_sectors << 9);
mutex_unlock(&bdev->bd_inode->i_mutex);
bdput(bdev);
}
diff --git a/drivers/md/raid6.h b/drivers/md/raid6.h
index 31cbee71365f..98dcde88470e 100644
--- a/drivers/md/raid6.h
+++ b/drivers/md/raid6.h
@@ -18,15 +18,6 @@
/* Set to 1 to use kernel-wide empty_zero_page */
#define RAID6_USE_EMPTY_ZERO_PAGE 0
-#include <linux/module.h>
-#include <linux/stddef.h>
-#include <linux/compiler.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/mempool.h>
-#include <linux/list.h>
-#include <linux/vmalloc.h>
#include <linux/raid/md.h>
#include <linux/raid/raid5.h>