diff options
Diffstat (limited to 'drivers/md')
-rw-r--r-- | drivers/md/bitmap.c | 1100 | ||||
-rw-r--r-- | drivers/md/bitmap.h | 60 | ||||
-rw-r--r-- | drivers/md/dm-mpath.c | 47 | ||||
-rw-r--r-- | drivers/md/dm-raid.c | 22 | ||||
-rw-r--r-- | drivers/md/dm-raid1.c | 3 | ||||
-rw-r--r-- | drivers/md/dm-region-hash.c | 5 | ||||
-rw-r--r-- | drivers/md/dm-thin-metadata.c | 136 | ||||
-rw-r--r-- | drivers/md/dm-thin-metadata.h | 13 | ||||
-rw-r--r-- | drivers/md/dm-thin.c | 216 | ||||
-rw-r--r-- | drivers/md/md.c | 415 | ||||
-rw-r--r-- | drivers/md/md.h | 12 | ||||
-rw-r--r-- | drivers/md/multipath.c | 3 | ||||
-rw-r--r-- | drivers/md/persistent-data/dm-space-map-checker.c | 54 | ||||
-rw-r--r-- | drivers/md/persistent-data/dm-space-map-disk.c | 11 | ||||
-rw-r--r-- | drivers/md/persistent-data/dm-transaction-manager.c | 13 | ||||
-rw-r--r-- | drivers/md/raid1.c | 52 | ||||
-rw-r--r-- | drivers/md/raid10.c | 1311 | ||||
-rw-r--r-- | drivers/md/raid10.h | 34 | ||||
-rw-r--r-- | drivers/md/raid5.c | 317 | ||||
-rw-r--r-- | drivers/md/raid5.h | 7 |
20 files changed, 2860 insertions, 971 deletions
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c index 17e2b472e16d..15dbe03117e4 100644 --- a/drivers/md/bitmap.c +++ b/drivers/md/bitmap.c @@ -45,7 +45,7 @@ static inline char *bmname(struct bitmap *bitmap) * if we find our page, we increment the page's refcount so that it stays * allocated while we're using it */ -static int bitmap_checkpage(struct bitmap *bitmap, +static int bitmap_checkpage(struct bitmap_counts *bitmap, unsigned long page, int create) __releases(bitmap->lock) __acquires(bitmap->lock) @@ -76,8 +76,7 @@ __acquires(bitmap->lock) spin_lock_irq(&bitmap->lock); if (mappage == NULL) { - pr_debug("%s: bitmap map page allocation failed, hijacking\n", - bmname(bitmap)); + pr_debug("md/bitmap: map page allocation failed, hijacking\n"); /* failed - set the hijacked flag so that we can use the * pointer as a counter */ if (!bitmap->bp[page].map) @@ -100,7 +99,7 @@ __acquires(bitmap->lock) /* if page is completely empty, put it back on the free list, or dealloc it */ /* if page was hijacked, unmark the flag so it might get alloced next time */ /* Note: lock should be held when calling this */ -static void bitmap_checkfree(struct bitmap *bitmap, unsigned long page) +static void bitmap_checkfree(struct bitmap_counts *bitmap, unsigned long page) { char *ptr; @@ -130,22 +129,14 @@ static void bitmap_checkfree(struct bitmap *bitmap, unsigned long page) */ /* IO operations when bitmap is stored near all superblocks */ -static struct page *read_sb_page(struct mddev *mddev, loff_t offset, - struct page *page, - unsigned long index, int size) +static int read_sb_page(struct mddev *mddev, loff_t offset, + struct page *page, + unsigned long index, int size) { /* choose a good rdev and read the page from there */ struct md_rdev *rdev; sector_t target; - int did_alloc = 0; - - if (!page) { - page = alloc_page(GFP_KERNEL); - if (!page) - return ERR_PTR(-ENOMEM); - did_alloc = 1; - } rdev_for_each(rdev, mddev) { if (! test_bit(In_sync, &rdev->flags) @@ -158,15 +149,10 @@ static struct page *read_sb_page(struct mddev *mddev, loff_t offset, roundup(size, bdev_logical_block_size(rdev->bdev)), page, READ, true)) { page->index = index; - attach_page_buffers(page, NULL); /* so that free_buffer will - * quietly no-op */ - return page; + return 0; } } - if (did_alloc) - put_page(page); - return ERR_PTR(-EIO); - + return -EIO; } static struct md_rdev *next_active_rdev(struct md_rdev *rdev, struct mddev *mddev) @@ -208,6 +194,7 @@ static int write_sb_page(struct bitmap *bitmap, struct page *page, int wait) struct md_rdev *rdev = NULL; struct block_device *bdev; struct mddev *mddev = bitmap->mddev; + struct bitmap_storage *store = &bitmap->storage; while ((rdev = next_active_rdev(rdev, mddev)) != NULL) { int size = PAGE_SIZE; @@ -215,9 +202,13 @@ static int write_sb_page(struct bitmap *bitmap, struct page *page, int wait) bdev = (rdev->meta_bdev) ? rdev->meta_bdev : rdev->bdev; - if (page->index == bitmap->file_pages-1) - size = roundup(bitmap->last_page_size, + if (page->index == store->file_pages-1) { + int last_page_size = store->bytes & (PAGE_SIZE-1); + if (last_page_size == 0) + last_page_size = PAGE_SIZE; + size = roundup(last_page_size, bdev_logical_block_size(bdev)); + } /* Just make sure we aren't corrupting data or * metadata */ @@ -276,10 +267,10 @@ static void write_page(struct bitmap *bitmap, struct page *page, int wait) { struct buffer_head *bh; - if (bitmap->file == NULL) { + if (bitmap->storage.file == NULL) { switch (write_sb_page(bitmap, page, wait)) { case -EINVAL: - bitmap->flags |= BITMAP_WRITE_ERROR; + set_bit(BITMAP_WRITE_ERROR, &bitmap->flags); } } else { @@ -297,20 +288,16 @@ static void write_page(struct bitmap *bitmap, struct page *page, int wait) wait_event(bitmap->write_wait, atomic_read(&bitmap->pending_writes)==0); } - if (bitmap->flags & BITMAP_WRITE_ERROR) + if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags)) bitmap_file_kick(bitmap); } static void end_bitmap_write(struct buffer_head *bh, int uptodate) { struct bitmap *bitmap = bh->b_private; - unsigned long flags; - if (!uptodate) { - spin_lock_irqsave(&bitmap->lock, flags); - bitmap->flags |= BITMAP_WRITE_ERROR; - spin_unlock_irqrestore(&bitmap->lock, flags); - } + if (!uptodate) + set_bit(BITMAP_WRITE_ERROR, &bitmap->flags); if (atomic_dec_and_test(&bitmap->pending_writes)) wake_up(&bitmap->write_wait); } @@ -325,8 +312,12 @@ __clear_page_buffers(struct page *page) } static void free_buffers(struct page *page) { - struct buffer_head *bh = page_buffers(page); + struct buffer_head *bh; + if (!PagePrivate(page)) + return; + + bh = page_buffers(page); while (bh) { struct buffer_head *next = bh->b_this_page; free_buffer_head(bh); @@ -343,11 +334,12 @@ static void free_buffers(struct page *page) * This usage is similar to how swap files are handled, and allows us * to write to a file with no concerns of memory allocation failing. */ -static struct page *read_page(struct file *file, unsigned long index, - struct bitmap *bitmap, - unsigned long count) +static int read_page(struct file *file, unsigned long index, + struct bitmap *bitmap, + unsigned long count, + struct page *page) { - struct page *page = NULL; + int ret = 0; struct inode *inode = file->f_path.dentry->d_inode; struct buffer_head *bh; sector_t block; @@ -355,16 +347,9 @@ static struct page *read_page(struct file *file, unsigned long index, pr_debug("read bitmap file (%dB @ %llu)\n", (int)PAGE_SIZE, (unsigned long long)index << PAGE_SHIFT); - page = alloc_page(GFP_KERNEL); - if (!page) - page = ERR_PTR(-ENOMEM); - if (IS_ERR(page)) - goto out; - bh = alloc_page_buffers(page, 1<<inode->i_blkbits, 0); if (!bh) { - put_page(page); - page = ERR_PTR(-ENOMEM); + ret = -ENOMEM; goto out; } attach_page_buffers(page, bh); @@ -376,8 +361,7 @@ static struct page *read_page(struct file *file, unsigned long index, bh->b_blocknr = bmap(inode, block); if (bh->b_blocknr == 0) { /* Cannot use this file! */ - free_buffers(page); - page = ERR_PTR(-EINVAL); + ret = -EINVAL; goto out; } bh->b_bdev = inode->i_sb->s_bdev; @@ -400,17 +384,15 @@ static struct page *read_page(struct file *file, unsigned long index, wait_event(bitmap->write_wait, atomic_read(&bitmap->pending_writes)==0); - if (bitmap->flags & BITMAP_WRITE_ERROR) { - free_buffers(page); - page = ERR_PTR(-EIO); - } + if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags)) + ret = -EIO; out: - if (IS_ERR(page)) - printk(KERN_ALERT "md: bitmap read error: (%dB @ %llu): %ld\n", + if (ret) + printk(KERN_ALERT "md: bitmap read error: (%dB @ %llu): %d\n", (int)PAGE_SIZE, (unsigned long long)index << PAGE_SHIFT, - PTR_ERR(page)); - return page; + ret); + return ret; } /* @@ -426,9 +408,9 @@ void bitmap_update_sb(struct bitmap *bitmap) return; if (bitmap->mddev->bitmap_info.external) return; - if (!bitmap->sb_page) /* no superblock */ + if (!bitmap->storage.sb_page) /* no superblock */ return; - sb = kmap_atomic(bitmap->sb_page); + sb = kmap_atomic(bitmap->storage.sb_page); sb->events = cpu_to_le64(bitmap->mddev->events); if (bitmap->mddev->events < bitmap->events_cleared) /* rocking back to read-only */ @@ -438,8 +420,13 @@ void bitmap_update_sb(struct bitmap *bitmap) /* Just in case these have been changed via sysfs: */ sb->daemon_sleep = cpu_to_le32(bitmap->mddev->bitmap_info.daemon_sleep/HZ); sb->write_behind = cpu_to_le32(bitmap->mddev->bitmap_info.max_write_behind); + /* This might have been changed by a reshape */ + sb->sync_size = cpu_to_le64(bitmap->mddev->resync_max_sectors); + sb->chunksize = cpu_to_le32(bitmap->mddev->bitmap_info.chunksize); + sb->sectors_reserved = cpu_to_le32(bitmap->mddev-> + bitmap_info.space); kunmap_atomic(sb); - write_page(bitmap, bitmap->sb_page, 1); + write_page(bitmap, bitmap->storage.sb_page, 1); } /* print out the bitmap file superblock */ @@ -447,9 +434,9 @@ void bitmap_print_sb(struct bitmap *bitmap) { bitmap_super_t *sb; - if (!bitmap || !bitmap->sb_page) + if (!bitmap || !bitmap->storage.sb_page) return; - sb = kmap_atomic(bitmap->sb_page); + sb = kmap_atomic(bitmap->storage.sb_page); printk(KERN_DEBUG "%s: bitmap file superblock:\n", bmname(bitmap)); printk(KERN_DEBUG " magic: %08x\n", le32_to_cpu(sb->magic)); printk(KERN_DEBUG " version: %d\n", le32_to_cpu(sb->version)); @@ -488,15 +475,15 @@ static int bitmap_new_disk_sb(struct bitmap *bitmap) unsigned long chunksize, daemon_sleep, write_behind; int err = -EINVAL; - bitmap->sb_page = alloc_page(GFP_KERNEL); - if (IS_ERR(bitmap->sb_page)) { - err = PTR_ERR(bitmap->sb_page); - bitmap->sb_page = NULL; + bitmap->storage.sb_page = alloc_page(GFP_KERNEL); + if (IS_ERR(bitmap->storage.sb_page)) { + err = PTR_ERR(bitmap->storage.sb_page); + bitmap->storage.sb_page = NULL; return err; } - bitmap->sb_page->index = 0; + bitmap->storage.sb_page->index = 0; - sb = kmap_atomic(bitmap->sb_page); + sb = kmap_atomic(bitmap->storage.sb_page); sb->magic = cpu_to_le32(BITMAP_MAGIC); sb->version = cpu_to_le32(BITMAP_MAJOR_HI); @@ -534,8 +521,8 @@ static int bitmap_new_disk_sb(struct bitmap *bitmap) memcpy(sb->uuid, bitmap->mddev->uuid, 16); - bitmap->flags |= BITMAP_STALE; - sb->state |= cpu_to_le32(BITMAP_STALE); + set_bit(BITMAP_STALE, &bitmap->flags); + sb->state = cpu_to_le32(bitmap->flags); bitmap->events_cleared = bitmap->mddev->events; sb->events_cleared = cpu_to_le64(bitmap->mddev->events); @@ -551,31 +538,45 @@ static int bitmap_read_sb(struct bitmap *bitmap) bitmap_super_t *sb; unsigned long chunksize, daemon_sleep, write_behind; unsigned long long events; + unsigned long sectors_reserved = 0; int err = -EINVAL; + struct page *sb_page; + if (!bitmap->storage.file && !bitmap->mddev->bitmap_info.offset) { + chunksize = 128 * 1024 * 1024; + daemon_sleep = 5 * HZ; + write_behind = 0; + set_bit(BITMAP_STALE, &bitmap->flags); + err = 0; + goto out_no_sb; + } /* page 0 is the superblock, read it... */ - if (bitmap->file) { - loff_t isize = i_size_read(bitmap->file->f_mapping->host); + sb_page = alloc_page(GFP_KERNEL); + if (!sb_page) + return -ENOMEM; + bitmap->storage.sb_page = sb_page; + + if (bitmap->storage.file) { + loff_t isize = i_size_read(bitmap->storage.file->f_mapping->host); int bytes = isize > PAGE_SIZE ? PAGE_SIZE : isize; - bitmap->sb_page = read_page(bitmap->file, 0, bitmap, bytes); + err = read_page(bitmap->storage.file, 0, + bitmap, bytes, sb_page); } else { - bitmap->sb_page = read_sb_page(bitmap->mddev, - bitmap->mddev->bitmap_info.offset, - NULL, - 0, sizeof(bitmap_super_t)); + err = read_sb_page(bitmap->mddev, + bitmap->mddev->bitmap_info.offset, + sb_page, + 0, sizeof(bitmap_super_t)); } - if (IS_ERR(bitmap->sb_page)) { - err = PTR_ERR(bitmap->sb_page); - bitmap->sb_page = NULL; + if (err) return err; - } - sb = kmap_atomic(bitmap->sb_page); + sb = kmap_atomic(sb_page); chunksize = le32_to_cpu(sb->chunksize); daemon_sleep = le32_to_cpu(sb->daemon_sleep) * HZ; write_behind = le32_to_cpu(sb->write_behind); + sectors_reserved = le32_to_cpu(sb->sectors_reserved); /* verify that the bitmap-specific fields are valid */ if (sb->magic != cpu_to_le32(BITMAP_MAGIC)) @@ -618,60 +619,32 @@ static int bitmap_read_sb(struct bitmap *bitmap) "-- forcing full recovery\n", bmname(bitmap), events, (unsigned long long) bitmap->mddev->events); - sb->state |= cpu_to_le32(BITMAP_STALE); + set_bit(BITMAP_STALE, &bitmap->flags); } } /* assign fields using values from superblock */ - bitmap->mddev->bitmap_info.chunksize = chunksize; - bitmap->mddev->bitmap_info.daemon_sleep = daemon_sleep; - bitmap->mddev->bitmap_info.max_write_behind = write_behind; bitmap->flags |= le32_to_cpu(sb->state); if (le32_to_cpu(sb->version) == BITMAP_MAJOR_HOSTENDIAN) - bitmap->flags |= BITMAP_HOSTENDIAN; + set_bit(BITMAP_HOSTENDIAN, &bitmap->flags); bitmap->events_cleared = le64_to_cpu(sb->events_cleared); - if (bitmap->flags & BITMAP_STALE) - bitmap->events_cleared = bitmap->mddev->events; err = 0; out: kunmap_atomic(sb); +out_no_sb: + if (test_bit(BITMAP_STALE, &bitmap->flags)) + bitmap->events_cleared = bitmap->mddev->events; + bitmap->mddev->bitmap_info.chunksize = chunksize; + bitmap->mddev->bitmap_info.daemon_sleep = daemon_sleep; + bitmap->mddev->bitmap_info.max_write_behind = write_behind; + if (bitmap->mddev->bitmap_info.space == 0 || + bitmap->mddev->bitmap_info.space > sectors_reserved) + bitmap->mddev->bitmap_info.space = sectors_reserved; if (err) bitmap_print_sb(bitmap); return err; } -enum bitmap_mask_op { - MASK_SET, - MASK_UNSET -}; - -/* record the state of the bitmap in the superblock. Return the old value */ -static int bitmap_mask_state(struct bitmap *bitmap, enum bitmap_state bits, - enum bitmap_mask_op op) -{ - bitmap_super_t *sb; - int old; - - if (!bitmap->sb_page) /* can't set the state */ - return 0; - sb = kmap_atomic(bitmap->sb_page); - old = le32_to_cpu(sb->state) & bits; - switch (op) { - case MASK_SET: - sb->state |= cpu_to_le32(bits); - bitmap->flags |= bits; - break; - case MASK_UNSET: - sb->state &= cpu_to_le32(~bits); - bitmap->flags &= ~bits; - break; - default: - BUG(); - } - kunmap_atomic(sb); - return old; -} - /* * general bitmap file operations */ @@ -683,17 +656,19 @@ static int bitmap_mask_state(struct bitmap *bitmap, enum bitmap_state bits, * file a page at a time. There's a superblock at the start of the file. */ /* calculate the index of the page that contains this bit */ -static inline unsigned long file_page_index(struct bitmap *bitmap, unsigned long chunk) +static inline unsigned long file_page_index(struct bitmap_storage *store, + unsigned long chunk) { - if (!bitmap->mddev->bitmap_info.external) + if (store->sb_page) chunk += sizeof(bitmap_super_t) << 3; return chunk >> PAGE_BIT_SHIFT; } /* calculate the (bit) offset of this bit within a page */ -static inline unsigned long file_page_offset(struct bitmap *bitmap, unsigned long chunk) +static inline unsigned long file_page_offset(struct bitmap_storage *store, + unsigned long chunk) { - if (!bitmap->mddev->bitmap_info.external) + if (store->sb_page) chunk += sizeof(bitmap_super_t) << 3; return chunk & (PAGE_BITS - 1); } @@ -705,57 +680,86 @@ static inline unsigned long file_page_offset(struct bitmap *bitmap, unsigned lon * 1 page (e.g., x86) or less than 1 page -- so the bitmap might start on page * 0 or page 1 */ -static inline struct page *filemap_get_page(struct bitmap *bitmap, +static inline struct page *filemap_get_page(struct bitmap_storage *store, unsigned long chunk) { - if (file_page_index(bitmap, chunk) >= bitmap->file_pages) + if (file_page_index(store, chunk) >= store->file_pages) return NULL; - return bitmap->filemap[file_page_index(bitmap, chunk) - - file_page_index(bitmap, 0)]; + return store->filemap[file_page_index(store, chunk) + - file_page_index(store, 0)]; } -static void bitmap_file_unmap(struct bitmap *bitmap) +static int bitmap_storage_alloc(struct bitmap_storage *store, + unsigned long chunks, int with_super) +{ + int pnum; + unsigned long num_pages; + unsigned long bytes; + + bytes = DIV_ROUND_UP(chunks, 8); + if (with_super) + bytes += sizeof(bitmap_super_t); + + num_pages = DIV_ROUND_UP(bytes, PAGE_SIZE); + + store->filemap = kmalloc(sizeof(struct page *) + * num_pages, GFP_KERNEL); + if (!store->filemap) + return -ENOMEM; + + if (with_super && !store->sb_page) { + store->sb_page = alloc_page(GFP_KERNEL|__GFP_ZERO); + if (store->sb_page == NULL) + return -ENOMEM; + store->sb_page->index = 0; + } + pnum = 0; + if (store->sb_page) { + store->filemap[0] = store->sb_page; + pnum = 1; + } + for ( ; pnum < num_pages; pnum++) { + store->filemap[pnum] = alloc_page(GFP_KERNEL|__GFP_ZERO); + if (!store->filemap[pnum]) { + store->file_pages = pnum; + return -ENOMEM; + } + store->filemap[pnum]->index = pnum; + } + store->file_pages = pnum; + + /* We need 4 bits per page, rounded up to a multiple + * of sizeof(unsigned long) */ + store->filemap_attr = kzalloc( + roundup(DIV_ROUND_UP(num_pages*4, 8), sizeof(unsigned long)), + GFP_KERNEL); + if (!store->filemap_attr) + return -ENOMEM; + + store->bytes = bytes; + + return 0; +} + +static void bitmap_file_unmap(struct bitmap_storage *store) { struct page **map, *sb_page; - unsigned long *attr; int pages; - unsigned long flags; + struct file *file; - spin_lock_irqsave(&bitmap->lock, flags); - map = bitmap->filemap; - bitmap->filemap = NULL; - attr = bitmap->filemap_attr; - bitmap->filemap_attr = NULL; - pages = bitmap->file_pages; - bitmap->file_pages = 0; - sb_page = bitmap->sb_page; - bitmap->sb_page = NULL; - spin_unlock_irqrestore(&bitmap->lock, flags); + file = store->file; + map = store->filemap; + pages = store->file_pages; + sb_page = store->sb_page; while (pages--) if (map[pages] != sb_page) /* 0 is sb_page, release it below */ free_buffers(map[pages]); kfree(map); - kfree(attr); + kfree(store->filemap_attr); if (sb_page) free_buffers(sb_page); -} - -static void bitmap_file_put(struct bitmap *bitmap) -{ - struct file *file; - unsigned long flags; - - spin_lock_irqsave(&bitmap->lock, flags); - file = bitmap->file; - bitmap->file = NULL; - spin_unlock_irqrestore(&bitmap->lock, flags); - - if (file) - wait_event(bitmap->write_wait, - atomic_read(&bitmap->pending_writes)==0); - bitmap_file_unmap(bitmap); if (file) { struct inode *inode = file->f_path.dentry->d_inode; @@ -773,14 +777,14 @@ static void bitmap_file_kick(struct bitmap *bitmap) { char *path, *ptr = NULL; - if (bitmap_mask_state(bitmap, BITMAP_STALE, MASK_SET) == 0) { + if (!test_and_set_bit(BITMAP_STALE, &bitmap->flags)) { bitmap_update_sb(bitmap); - if (bitmap->file) { + if (bitmap->storage.file) { path = kmalloc(PAGE_SIZE, GFP_KERNEL); if (path) - ptr = d_path(&bitmap->file->f_path, path, - PAGE_SIZE); + ptr = d_path(&bitmap->storage.file->f_path, + path, PAGE_SIZE); printk(KERN_ALERT "%s: kicking failed bitmap file %s from array!\n", @@ -792,10 +796,6 @@ static void bitmap_file_kick(struct bitmap *bitmap) "%s: disabling internal bitmap due to errors\n", bmname(bitmap)); } - - bitmap_file_put(bitmap); - - return; } enum bitmap_page_attr { @@ -805,24 +805,30 @@ enum bitmap_page_attr { BITMAP_PAGE_NEEDWRITE = 2, /* there are cleared bits that need to be synced */ }; -static inline void set_page_attr(struct bitmap *bitmap, struct page *page, - enum bitmap_page_attr attr) +static inline void set_page_attr(struct bitmap *bitmap, int pnum, + enum bitmap_page_attr attr) { - __set_bit((page->index<<2) + attr, bitmap->filemap_attr); + set_bit((pnum<<2) + attr, bitmap->storage.filemap_attr); } -static inline void clear_page_attr(struct bitmap *bitmap, struct page *page, - enum bitmap_page_attr attr) +static inline void clear_page_attr(struct bitmap *bitmap, int pnum, + enum bitmap_page_attr attr) { - __clear_bit((page->index<<2) + attr, bitmap->filemap_attr); + clear_bit((pnum<<2) + attr, bitmap->storage.filemap_attr); } -static inline unsigned long test_page_attr(struct bitmap *bitmap, struct page *page, - enum bitmap_page_attr attr) +static inline int test_page_attr(struct bitmap *bitmap, int pnum, + enum bitmap_page_attr attr) { - return test_bit((page->index<<2) + attr, bitmap->filemap_attr); + return test_bit((pnum<<2) + attr, bitmap->storage.filemap_attr); } +static inline int test_and_clear_page_attr(struct bitmap *bitmap, int pnum, + enum bitmap_page_attr attr) +{ + return test_and_clear_bit((pnum<<2) + attr, + bitmap->storage.filemap_attr); +} /* * bitmap_file_set_bit -- called before performing a write to the md device * to set (and eventually sync) a particular bit in the bitmap file @@ -835,26 +841,46 @@ static void bitmap_file_set_bit(struct bitmap *bitmap, sector_t block) unsigned long bit; struct page *page; void *kaddr; - unsigned long chunk = block >> bitmap->chunkshift; + unsigned long chunk = block >> bitmap->counts.chunkshift; - if (!bitmap->filemap) - return; - - page = filemap_get_page(bitmap, chunk); + page = filemap_get_page(&bitmap->storage, chunk); if (!page) return; - bit = file_page_offset(bitmap, chunk); + bit = file_page_offset(&bitmap->storage, chunk); /* set the bit */ kaddr = kmap_atomic(page); - if (bitmap->flags & BITMAP_HOSTENDIAN) + if (test_bit(BITMAP_HOSTENDIAN, &bitmap->flags)) set_bit(bit, kaddr); else - __set_bit_le(bit, kaddr); + test_and_set_bit_le(bit, kaddr); kunmap_atomic(kaddr); pr_debug("set file bit %lu page %lu\n", bit, page->index); /* record page number so it gets flushed to disk when unplug occurs */ - set_page_attr(bitmap, page, BITMAP_PAGE_DIRTY); + set_page_attr(bitmap, page->index, BITMAP_PAGE_DIRTY); +} + +static void bitmap_file_clear_bit(struct bitmap *bitmap, sector_t block) +{ + unsigned long bit; + struct page *page; + void *paddr; + unsigned long chunk = block >> bitmap->counts.chunkshift; + + page = filemap_get_page(&bitmap->storage, chunk); + if (!page) + return; + bit = file_page_offset(&bitmap->storage, chunk); + paddr = kmap_atomic(page); + if (test_bit(BITMAP_HOSTENDIAN, &bitmap->flags)) + clear_bit(bit, paddr); + else + test_and_clear_bit_le(bit, paddr); + kunmap_atomic(paddr); + if (!test_page_attr(bitmap, page->index, BITMAP_PAGE_NEEDWRITE)) { + set_page_attr(bitmap, page->index, BITMAP_PAGE_PENDING); + bitmap->allclean = 0; + } } /* this gets called when the md device is ready to unplug its underlying @@ -862,42 +888,37 @@ static void bitmap_file_set_bit(struct bitmap *bitmap, sector_t block) * sync the dirty pages of the bitmap file to disk */ void bitmap_unplug(struct bitmap *bitmap) { - unsigned long i, flags; + unsigned long i; int dirty, need_write; - struct page *page; int wait = 0; - if (!bitmap) + if (!bitmap || !bitmap->storage.filemap || + test_bit(BITMAP_STALE, &bitmap->flags)) return; /* look at each page to see if there are any set bits that need to be * flushed out to disk */ - for (i = 0; i < bitmap->file_pages; i++) { - spin_lock_irqsave(&bitmap->lock, flags); - if (!bitmap->filemap) { - spin_unlock_irqrestore(&bitmap->lock, flags); + for (i = 0; i < bitmap->storage.file_pages; i++) { + if (!bitmap->storage.filemap) return; + dirty = test_and_clear_page_attr(bitmap, i, BITMAP_PAGE_DIRTY); + need_write = test_and_clear_page_attr(bitmap, i, + BITMAP_PAGE_NEEDWRITE); + if (dirty || need_write) { + clear_page_attr(bitmap, i, BITMAP_PAGE_PENDING); + write_page(bitmap, bitmap->storage.filemap[i], 0); } - page = bitmap->filemap[i]; - dirty = test_page_attr(bitmap, page, BITMAP_PAGE_DIRTY); - need_write = test_page_attr(bitmap, page, BITMAP_PAGE_NEEDWRITE); - clear_page_attr(bitmap, page, BITMAP_PAGE_DIRTY); - clear_page_attr(bitmap, page, BITMAP_PAGE_NEEDWRITE); if (dirty) wait = 1; - spin_unlock_irqrestore(&bitmap->lock, flags); - - if (dirty || need_write) - write_page(bitmap, page, 0); } if (wait) { /* if any writes were performed, we need to wait on them */ - if (bitmap->file) + if (bitmap->storage.file) wait_event(bitmap->write_wait, atomic_read(&bitmap->pending_writes)==0); else md_super_wait(bitmap->mddev); } - if (bitmap->flags & BITMAP_WRITE_ERROR) + if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags)) bitmap_file_kick(bitmap); } EXPORT_SYMBOL(bitmap_unplug); @@ -917,98 +938,77 @@ static void bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int n static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start) { unsigned long i, chunks, index, oldindex, bit; - struct page *page = NULL, *oldpage = NULL; - unsigned long num_pages, bit_cnt = 0; + struct page *page = NULL; + unsigned long bit_cnt = 0; struct file *file; - unsigned long bytes, offset; + unsigned long offset; int outofdate; int ret = -ENOSPC; void *paddr; + struct bitmap_storage *store = &bitmap->storage; - chunks = bitmap->chunks; - file = bitmap->file; + chunks = bitmap->counts.chunks; + file = store->file; - BUG_ON(!file && !bitmap->mddev->bitmap_info.offset); + if (!file && !bitmap->mddev->bitmap_info.offset) { + /* No permanent bitmap - fill with '1s'. */ + store->filemap = NULL; + store->file_pages = 0; + for (i = 0; i < chunks ; i++) { + /* if the disk bit is set, set the memory bit */ + int needed = ((sector_t)(i+1) << (bitmap->counts.chunkshift) + >= start); + bitmap_set_memory_bits(bitmap, + (sector_t)i << bitmap->counts.chunkshift, + needed); + } + return 0; + } - outofdate = bitmap->flags & BITMAP_STALE; + outofdate = test_bit(BITMAP_STALE, &bitmap->flags); if (outofdate) printk(KERN_INFO "%s: bitmap file is out of date, doing full " "recovery\n", bmname(bitmap)); - bytes = DIV_ROUND_UP(bitmap->chunks, 8); - if (!bitmap->mddev->bitmap_info.external) - bytes += sizeof(bitmap_super_t); - - num_pages = DIV_ROUND_UP(bytes, PAGE_SIZE); - - if (file && i_size_read(file->f_mapping->host) < bytes) { + if (file && i_size_read(file->f_mapping->host) < store->bytes) { printk(KERN_INFO "%s: bitmap file too short %lu < %lu\n", - bmname(bitmap), - (unsigned long) i_size_read(file->f_mapping->host), - bytes); + bmname(bitmap), + (unsigned long) i_size_read(file->f_mapping->host), + store->bytes); goto err; } - ret = -ENOMEM; - - bitmap->filemap = kmalloc(sizeof(struct page *) * num_pages, GFP_KERNEL); - if (!bitmap->filemap) - goto err; - - /* We need 4 bits per page, rounded up to a multiple of sizeof(unsigned long) */ - bitmap->filemap_attr = kzalloc( - roundup(DIV_ROUND_UP(num_pages*4, 8), sizeof(unsigned long)), - GFP_KERNEL); - if (!bitmap->filemap_attr) - goto err; - oldindex = ~0L; + offset = 0; + if (!bitmap->mddev->bitmap_info.external) + offset = sizeof(bitmap_super_t); for (i = 0; i < chunks; i++) { int b; - index = file_page_index(bitmap, i); - bit = file_page_offset(bitmap, i); + index = file_page_index(&bitmap->storage, i); + bit = file_page_offset(&bitmap->storage, i); if (index != oldindex) { /* this is a new page, read it in */ int count; /* unmap the old page, we're done with it */ - if (index == num_pages-1) - count = bytes - index * PAGE_SIZE; + if (index == store->file_pages-1) + count = store->bytes - index * PAGE_SIZE; else count = PAGE_SIZE; - if (index == 0 && bitmap->sb_page) { - /* - * if we're here then the superblock page - * contains some bits (PAGE_SIZE != sizeof sb) - * we've already read it in, so just use it - */ - page = bitmap->sb_page; - offset = sizeof(bitmap_super_t); - if (!file) - page = read_sb_page( - bitmap->mddev, - bitmap->mddev->bitmap_info.offset, - page, - index, count); - } else if (file) { - page = read_page(file, index, bitmap, count); - offset = 0; - } else { - page = read_sb_page(bitmap->mddev, - bitmap->mddev->bitmap_info.offset, - NULL, - index, count); - offset = 0; - } - if (IS_ERR(page)) { /* read error */ - ret = PTR_ERR(page); + page = store->filemap[index]; + if (file) + ret = read_page(file, index, bitmap, + count, page); + else + ret = read_sb_page( + bitmap->mddev, + bitmap->mddev->bitmap_info.offset, + page, + index, count); + + if (ret) goto err; - } oldindex = index; - oldpage = page; - - bitmap->filemap[bitmap->file_pages++] = page; - bitmap->last_page_size = count; if (outofdate) { /* @@ -1022,39 +1022,33 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start) write_page(bitmap, page, 1); ret = -EIO; - if (bitmap->flags & BITMAP_WRITE_ERROR) + if (test_bit(BITMAP_WRITE_ERROR, + &bitmap->flags)) goto err; } } paddr = kmap_atomic(page); - if (bitmap->flags & BITMAP_HOSTENDIAN) + if (test_bit(BITMAP_HOSTENDIAN, &bitmap->flags)) b = test_bit(bit, paddr); else b = test_bit_le(bit, paddr); kunmap_atomic(paddr); if (b) { /* if the disk bit is set, set the memory bit */ - int needed = ((sector_t)(i+1) << bitmap->chunkshift + int needed = ((sector_t)(i+1) << bitmap->counts.chunkshift >= start); bitmap_set_memory_bits(bitmap, - (sector_t)i << bitmap->chunkshift, + (sector_t)i << bitmap->counts.chunkshift, needed); bit_cnt++; } - } - - /* everything went OK */ - ret = 0; - bitmap_mask_state(bitmap, BITMAP_STALE, MASK_UNSET); - - if (bit_cnt) { /* Kick recovery if any bits were set */ - set_bit(MD_RECOVERY_NEEDED, &bitmap->mddev->recovery); - md_wakeup_thread(bitmap->mddev->thread); + offset = 0; } printk(KERN_INFO "%s: bitmap initialized from disk: " - "read %lu/%lu pages, set %lu of %lu bits\n", - bmname(bitmap), bitmap->file_pages, num_pages, bit_cnt, chunks); + "read %lu pages, set %lu of %lu bits\n", + bmname(bitmap), store->file_pages, + bit_cnt, chunks); return 0; @@ -1071,22 +1065,38 @@ void bitmap_write_all(struct bitmap *bitmap) */ int i; - spin_lock_irq(&bitmap->lock); - for (i = 0; i < bitmap->file_pages; i++) - set_page_attr(bitmap, bitmap->filemap[i], + if (!bitmap || !bitmap->storage.filemap) + return; + if (bitmap->storage.file) + /* Only one copy, so nothing needed */ + return; + + for (i = 0; i < bitmap->storage.file_pages; i++) + set_page_attr(bitmap, i, BITMAP_PAGE_NEEDWRITE); bitmap->allclean = 0; - spin_unlock_irq(&bitmap->lock); } -static void bitmap_count_page(struct bitmap *bitmap, sector_t offset, int inc) +static void bitmap_count_page(struct bitmap_counts *bitmap, + sector_t offset, int inc) { sector_t chunk = offset >> bitmap->chunkshift; unsigned long page = chunk >> PAGE_COUNTER_SHIFT; bitmap->bp[page].count += inc; bitmap_checkfree(bitmap, page); } -static bitmap_counter_t *bitmap_get_counter(struct bitmap *bitmap, + +static void bitmap_set_pending(struct bitmap_counts *bitmap, sector_t offset) +{ + sector_t chunk = offset >> bitmap->chunkshift; + unsigned long page = chunk >> PAGE_COUNTER_SHIFT; + struct bitmap_page *bp = &bitmap->bp[page]; + + if (!bp->pending) + bp->pending = 1; +} + +static bitmap_counter_t *bitmap_get_counter(struct bitmap_counts *bitmap, sector_t offset, sector_t *blocks, int create); @@ -1099,10 +1109,9 @@ void bitmap_daemon_work(struct mddev *mddev) { struct bitmap *bitmap; unsigned long j; - unsigned long flags; - struct page *page = NULL, *lastpage = NULL; + unsigned long nextpage; sector_t blocks; - void *paddr; + struct bitmap_counts *counts; /* Use a mutex to guard daemon_work against * bitmap_destroy. @@ -1124,112 +1133,90 @@ void bitmap_daemon_work(struct mddev *mddev) } bitmap->allclean = 1; - spin_lock_irqsave(&bitmap->lock, flags); - for (j = 0; j < bitmap->chunks; j++) { + /* Any file-page which is PENDING now needs to be written. + * So set NEEDWRITE now, then after we make any last-minute changes + * we will write it. + */ + for (j = 0; j < bitmap->storage.file_pages; j++) + if (test_and_clear_page_attr(bitmap, j, + BITMAP_PAGE_PENDING)) + set_page_attr(bitmap, j, + BITMAP_PAGE_NEEDWRITE); + + if (bitmap->need_sync && + mddev->bitmap_info.external == 0) { + /* Arrange for superblock update as well as + * other changes */ + bitmap_super_t *sb; + bitmap->need_sync = 0; + if (bitmap->storage.filemap) { + sb = kmap_atomic(bitmap->storage.sb_page); + sb->events_cleared = + cpu_to_le64(bitmap->events_cleared); + kunmap_atomic(sb); + set_page_attr(bitmap, 0, + BITMAP_PAGE_NEEDWRITE); + } + } + /* Now look at the bitmap counters and if any are '2' or '1', + * decrement and handle accordingly. + */ + counts = &bitmap->counts; + spin_lock_irq(&counts->lock); + nextpage = 0; + for (j = 0; j < counts->chunks; j++) { bitmap_counter_t *bmc; - if (!bitmap->filemap) - /* error or shutdown */ - break; + sector_t block = (sector_t)j << counts->chunkshift; - page = filemap_get_page(bitmap, j); - - if (page != lastpage) { - /* skip this page unless it's marked as needing cleaning */ - if (!test_page_attr(bitmap, page, BITMAP_PAGE_PENDING)) { - int need_write = test_page_attr(bitmap, page, - BITMAP_PAGE_NEEDWRITE); - if (need_write) - clear_page_attr(bitmap, page, BITMAP_PAGE_NEEDWRITE); - - spin_unlock_irqrestore(&bitmap->lock, flags); - if (need_write) - write_page(bitmap, page, 0); - spin_lock_irqsave(&bitmap->lock, flags); - j |= (PAGE_BITS - 1); + if (j == nextpage) { + nextpage += PAGE_COUNTER_RATIO; + if (!counts->bp[j >> PAGE_COUNTER_SHIFT].pending) { + j |= PAGE_COUNTER_MASK; continue; } - - /* grab the new page, sync and release the old */ - if (lastpage != NULL) { - if (test_page_attr(bitmap, lastpage, - BITMAP_PAGE_NEEDWRITE)) { - clear_page_attr(bitmap, lastpage, - BITMAP_PAGE_NEEDWRITE); - spin_unlock_irqrestore(&bitmap->lock, flags); - write_page(bitmap, lastpage, 0); - } else { - set_page_attr(bitmap, lastpage, - BITMAP_PAGE_NEEDWRITE); - bitmap->allclean = 0; - spin_unlock_irqrestore(&bitmap->lock, flags); - } - } else - spin_unlock_irqrestore(&bitmap->lock, flags); - lastpage = page; - - /* We are possibly going to clear some bits, so make - * sure that events_cleared is up-to-date. - */ - if (bitmap->need_sync && - mddev->bitmap_info.external == 0) { - bitmap_super_t *sb; - bitmap->need_sync = 0; - sb = kmap_atomic(bitmap->sb_page); - sb->events_cleared = - cpu_to_le64(bitmap->events_cleared); - kunmap_atomic(sb); - write_page(bitmap, bitmap->sb_page, 1); - } - spin_lock_irqsave(&bitmap->lock, flags); - if (!bitmap->need_sync) - clear_page_attr(bitmap, page, BITMAP_PAGE_PENDING); - else - bitmap->allclean = 0; + counts->bp[j >> PAGE_COUNTER_SHIFT].pending = 0; } - bmc = bitmap_get_counter(bitmap, - (sector_t)j << bitmap->chunkshift, + bmc = bitmap_get_counter(counts, + block, &blocks, 0); - if (!bmc) + + if (!bmc) { j |= PAGE_COUNTER_MASK; - else if (*bmc) { - if (*bmc == 1 && !bitmap->need_sync) { - /* we can clear the bit */ - *bmc = 0; - bitmap_count_page(bitmap, - (sector_t)j << bitmap->chunkshift, - -1); - - /* clear the bit */ - paddr = kmap_atomic(page); - if (bitmap->flags & BITMAP_HOSTENDIAN) - clear_bit(file_page_offset(bitmap, j), - paddr); - else - __clear_bit_le( - file_page_offset(bitmap, - j), - paddr); - kunmap_atomic(paddr); - } else if (*bmc <= 2) { - *bmc = 1; /* maybe clear the bit next time */ - set_page_attr(bitmap, page, BITMAP_PAGE_PENDING); - bitmap->allclean = 0; - } + continue; } - } - spin_unlock_irqrestore(&bitmap->lock, flags); - - /* now sync the final page */ - if (lastpage != NULL) { - spin_lock_irqsave(&bitmap->lock, flags); - if (test_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE)) { - clear_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE); - spin_unlock_irqrestore(&bitmap->lock, flags); - write_page(bitmap, lastpage, 0); - } else { - set_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE); + if (*bmc == 1 && !bitmap->need_sync) { + /* We can clear the bit */ + *bmc = 0; + bitmap_count_page(counts, block, -1); + bitmap_file_clear_bit(bitmap, block); + } else if (*bmc && *bmc <= 2) { + *bmc = 1; + bitmap_set_pending(counts, block); bitmap->allclean = 0; - spin_unlock_irqrestore(&bitmap->lock, flags); + } + } + spin_unlock_irq(&counts->lock); + + /* Now start writeout on any page in NEEDWRITE that isn't DIRTY. + * DIRTY pages need to be written by bitmap_unplug so it can wait + * for them. + * If we find any DIRTY page we stop there and let bitmap_unplug + * handle all the rest. This is important in the case where + * the first blocking holds the superblock and it has been updated. + * We mustn't write any other blocks before the superblock. + */ + for (j = 0; + j < bitmap->storage.file_pages + && !test_bit(BITMAP_STALE, &bitmap->flags); + j++) { + + if (test_page_attr(bitmap, j, + BITMAP_PAGE_DIRTY)) + /* bitmap_unplug will handle the rest */ + break; + if (test_and_clear_page_attr(bitmap, j, + BITMAP_PAGE_NEEDWRITE)) { + write_page(bitmap, bitmap->storage.filemap[j], 0); } } @@ -1240,7 +1227,7 @@ void bitmap_daemon_work(struct mddev *mddev) mutex_unlock(&mddev->bitmap_info.mutex); } -static bitmap_counter_t *bitmap_get_counter(struct bitmap *bitmap, +static bitmap_counter_t *bitmap_get_counter(struct bitmap_counts *bitmap, sector_t offset, sector_t *blocks, int create) __releases(bitmap->lock) @@ -1302,10 +1289,10 @@ int bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sect sector_t blocks; bitmap_counter_t *bmc; - spin_lock_irq(&bitmap->lock); - bmc = bitmap_get_counter(bitmap, offset, &blocks, 1); + spin_lock_irq(&bitmap->counts.lock); + bmc = bitmap_get_counter(&bitmap->counts, offset, &blocks, 1); if (!bmc) { - spin_unlock_irq(&bitmap->lock); + spin_unlock_irq(&bitmap->counts.lock); return 0; } @@ -1317,7 +1304,7 @@ int bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sect */ prepare_to_wait(&bitmap->overflow_wait, &__wait, TASK_UNINTERRUPTIBLE); - spin_unlock_irq(&bitmap->lock); + spin_unlock_irq(&bitmap->counts.lock); io_schedule(); finish_wait(&bitmap->overflow_wait, &__wait); continue; @@ -1326,7 +1313,7 @@ int bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sect switch (*bmc) { case 0: bitmap_file_set_bit(bitmap, offset); - bitmap_count_page(bitmap, offset, 1); + bitmap_count_page(&bitmap->counts, offset, 1); /* fall through */ case 1: *bmc = 2; @@ -1334,7 +1321,7 @@ int bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sect (*bmc)++; - spin_unlock_irq(&bitmap->lock); + spin_unlock_irq(&bitmap->counts.lock); offset += blocks; if (sectors > blocks) @@ -1364,10 +1351,10 @@ void bitmap_endwrite(struct bitmap *bitmap, sector_t offset, unsigned long secto unsigned long flags; bitmap_counter_t *bmc; - spin_lock_irqsave(&bitmap->lock, flags); - bmc = bitmap_get_counter(bitmap, offset, &blocks, 0); + spin_lock_irqsave(&bitmap->counts.lock, flags); + bmc = bitmap_get_counter(&bitmap->counts, offset, &blocks, 0); if (!bmc) { - spin_unlock_irqrestore(&bitmap->lock, flags); + spin_unlock_irqrestore(&bitmap->counts.lock, flags); return; } @@ -1386,14 +1373,10 @@ void bitmap_endwrite(struct bitmap *bitmap, sector_t offset, unsigned long secto (*bmc)--; if (*bmc <= 2) { - set_page_attr(bitmap, - filemap_get_page( - bitmap, - offset >> bitmap->chunkshift), - BITMAP_PAGE_PENDING); + bitmap_set_pending(&bitmap->counts, offset); bitmap->allclean = 0; } - spin_unlock_irqrestore(&bitmap->lock, flags); + spin_unlock_irqrestore(&bitmap->counts.lock, flags); offset += blocks; if (sectors > blocks) sectors -= blocks; @@ -1412,8 +1395,8 @@ static int __bitmap_start_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks = 1024; return 1; /* always resync if no bitmap */ } - spin_lock_irq(&bitmap->lock); - bmc = bitmap_get_counter(bitmap, offset, blocks, 0); + spin_lock_irq(&bitmap->counts.lock); + bmc = bitmap_get_counter(&bitmap->counts, offset, blocks, 0); rv = 0; if (bmc) { /* locked */ @@ -1427,7 +1410,7 @@ static int __bitmap_start_sync(struct bitmap *bitmap, sector_t offset, sector_t } } } - spin_unlock_irq(&bitmap->lock); + spin_unlock_irq(&bitmap->counts.lock); return rv; } @@ -1464,8 +1447,8 @@ void bitmap_end_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks, i *blocks = 1024; return; } - spin_lock_irqsave(&bitmap->lock, flags); - bmc = bitmap_get_counter(bitmap, offset, blocks, 0); + spin_lock_irqsave(&bitmap->counts.lock, flags); + bmc = bitmap_get_counter(&bitmap->counts, offset, blocks, 0); if (bmc == NULL) goto unlock; /* locked */ @@ -1476,15 +1459,13 @@ void bitmap_end_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks, i *bmc |= NEEDED_MASK; else { if (*bmc <= 2) { - set_page_attr(bitmap, - filemap_get_page(bitmap, offset >> bitmap->chunkshift), - BITMAP_PAGE_PENDING); + bitmap_set_pending(&bitmap->counts, offset); bitmap->allclean = 0; } } } unlock: - spin_unlock_irqrestore(&bitmap->lock, flags); + spin_unlock_irqrestore(&bitmap->counts.lock, flags); } EXPORT_SYMBOL(bitmap_end_sync); @@ -1524,7 +1505,7 @@ void bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector) bitmap->mddev->curr_resync_completed = sector; set_bit(MD_CHANGE_CLEAN, &bitmap->mddev->flags); - sector &= ~((1ULL << bitmap->chunkshift) - 1); + sector &= ~((1ULL << bitmap->counts.chunkshift) - 1); s = 0; while (s < sector && s < bitmap->mddev->resync_max_sectors) { bitmap_end_sync(bitmap, s, &blocks, 0); @@ -1538,27 +1519,25 @@ EXPORT_SYMBOL(bitmap_cond_end_sync); static void bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int needed) { /* For each chunk covered by any of these sectors, set the - * counter to 1 and set resync_needed. They should all + * counter to 2 and possibly set resync_needed. They should all * be 0 at this point */ sector_t secs; bitmap_counter_t *bmc; - spin_lock_irq(&bitmap->lock); - bmc = bitmap_get_counter(bitmap, offset, &secs, 1); + spin_lock_irq(&bitmap->counts.lock); + bmc = bitmap_get_counter(&bitmap->counts, offset, &secs, 1); if (!bmc) { - spin_unlock_irq(&bitmap->lock); + spin_unlock_irq(&bitmap->counts.lock); return; } if (!*bmc) { - struct page *page; *bmc = 2 | (needed ? NEEDED_MASK : 0); - bitmap_count_page(bitmap, offset, 1); - page = filemap_get_page(bitmap, offset >> bitmap->chunkshift); - set_page_attr(bitmap, page, BITMAP_PAGE_PENDING); + bitmap_count_page(&bitmap->counts, offset, 1); + bitmap_set_pending(&bitmap->counts, offset); bitmap->allclean = 0; } - spin_unlock_irq(&bitmap->lock); + spin_unlock_irq(&bitmap->counts.lock); } /* dirty the memory and file bits for bitmap chunks "s" to "e" */ @@ -1567,11 +1546,9 @@ void bitmap_dirty_bits(struct bitmap *bitmap, unsigned long s, unsigned long e) unsigned long chunk; for (chunk = s; chunk <= e; chunk++) { - sector_t sec = (sector_t)chunk << bitmap->chunkshift; + sector_t sec = (sector_t)chunk << bitmap->counts.chunkshift; bitmap_set_memory_bits(bitmap, sec, 1); - spin_lock_irq(&bitmap->lock); bitmap_file_set_bit(bitmap, sec); - spin_unlock_irq(&bitmap->lock); if (sec < bitmap->mddev->recovery_cp) /* We are asserting that the array is dirty, * so move the recovery_cp address back so @@ -1616,11 +1593,15 @@ static void bitmap_free(struct bitmap *bitmap) if (!bitmap) /* there was no bitmap */ return; - /* release the bitmap file and kill the daemon */ - bitmap_file_put(bitmap); + /* Shouldn't be needed - but just in case.... */ + wait_event(bitmap->write_wait, + atomic_read(&bitmap->pending_writes) == 0); + + /* release the bitmap file */ + bitmap_file_unmap(&bitmap->storage); - bp = bitmap->bp; - pages = bitmap->pages; + bp = bitmap->counts.bp; + pages = bitmap->counts.pages; /* free all allocated memory */ @@ -1659,25 +1640,19 @@ int bitmap_create(struct mddev *mddev) { struct bitmap *bitmap; sector_t blocks = mddev->resync_max_sectors; - unsigned long chunks; - unsigned long pages; struct file *file = mddev->bitmap_info.file; int err; struct sysfs_dirent *bm = NULL; BUILD_BUG_ON(sizeof(bitmap_super_t) != 256); - if (!file - && !mddev->bitmap_info.offset) /* bitmap disabled, nothing to do */ - return 0; - BUG_ON(file && mddev->bitmap_info.offset); bitmap = kzalloc(sizeof(*bitmap), GFP_KERNEL); if (!bitmap) return -ENOMEM; - spin_lock_init(&bitmap->lock); + spin_lock_init(&bitmap->counts.lock); atomic_set(&bitmap->pending_writes, 0); init_waitqueue_head(&bitmap->write_wait); init_waitqueue_head(&bitmap->overflow_wait); @@ -1693,7 +1668,7 @@ int bitmap_create(struct mddev *mddev) } else bitmap->sysfs_can_clear = NULL; - bitmap->file = file; + bitmap->storage.file = file; if (file) { get_file(file); /* As future accesses to this file will use bmap, @@ -1724,32 +1699,15 @@ int bitmap_create(struct mddev *mddev) goto error; bitmap->daemon_lastrun = jiffies; - bitmap->chunkshift = (ffz(~mddev->bitmap_info.chunksize) - - BITMAP_BLOCK_SHIFT); - - chunks = (blocks + (1 << bitmap->chunkshift) - 1) >> - bitmap->chunkshift; - pages = (chunks + PAGE_COUNTER_RATIO - 1) / PAGE_COUNTER_RATIO; - - BUG_ON(!pages); - - bitmap->chunks = chunks; - bitmap->pages = pages; - bitmap->missing_pages = pages; - - bitmap->bp = kzalloc(pages * sizeof(*bitmap->bp), GFP_KERNEL); - - err = -ENOMEM; - if (!bitmap->bp) + err = bitmap_resize(bitmap, blocks, mddev->bitmap_info.chunksize, 1); + if (err) goto error; printk(KERN_INFO "created bitmap (%lu pages) for device %s\n", - pages, bmname(bitmap)); + bitmap->counts.pages, bmname(bitmap)); mddev->bitmap = bitmap; - - - return (bitmap->flags & BITMAP_WRITE_ERROR) ? -EIO : 0; + return test_bit(BITMAP_WRITE_ERROR, &bitmap->flags) ? -EIO : 0; error: bitmap_free(bitmap); @@ -1790,13 +1748,17 @@ int bitmap_load(struct mddev *mddev) if (err) goto out; + clear_bit(BITMAP_STALE, &bitmap->flags); + + /* Kick recovery in case any bits were set */ + set_bit(MD_RECOVERY_NEEDED, &bitmap->mddev->recovery); mddev->thread->timeout = mddev->bitmap_info.daemon_sleep; md_wakeup_thread(mddev->thread); bitmap_update_sb(bitmap); - if (bitmap->flags & BITMAP_WRITE_ERROR) + if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags)) err = -EIO; out: return err; @@ -1806,30 +1768,194 @@ EXPORT_SYMBOL_GPL(bitmap_load); void bitmap_status(struct seq_file *seq, struct bitmap *bitmap) { unsigned long chunk_kb; - unsigned long flags; + struct bitmap_counts *counts; if (!bitmap) return; - spin_lock_irqsave(&bitmap->lock, flags); + counts = &bitmap->counts; + chunk_kb = bitmap->mddev->bitmap_info.chunksize >> 10; seq_printf(seq, "bitmap: %lu/%lu pages [%luKB], " "%lu%s chunk", - bitmap->pages - bitmap->missing_pages, - bitmap->pages, - (bitmap->pages - bitmap->missing_pages) + counts->pages - counts->missing_pages, + counts->pages, + (counts->pages - counts->missing_pages) << (PAGE_SHIFT - 10), chunk_kb ? chunk_kb : bitmap->mddev->bitmap_info.chunksize, chunk_kb ? "KB" : "B"); - if (bitmap->file) { + if (bitmap->storage.file) { seq_printf(seq, ", file: "); - seq_path(seq, &bitmap->file->f_path, " \t\n"); + seq_path(seq, &bitmap->storage.file->f_path, " \t\n"); } seq_printf(seq, "\n"); - spin_unlock_irqrestore(&bitmap->lock, flags); } +int bitmap_resize(struct bitmap *bitmap, sector_t blocks, + int chunksize, int init) +{ + /* If chunk_size is 0, choose an appropriate chunk size. + * Then possibly allocate new storage space. + * Then quiesce, copy bits, replace bitmap, and re-start + * + * This function is called both to set up the initial bitmap + * and to resize the bitmap while the array is active. + * If this happens as a result of the array being resized, + * chunksize will be zero, and we need to choose a suitable + * chunksize, otherwise we use what we are given. + */ + struct bitmap_storage store; + struct bitmap_counts old_counts; + unsigned long chunks; + sector_t block; + sector_t old_blocks, new_blocks; + int chunkshift; + int ret = 0; + long pages; + struct bitmap_page *new_bp; + + if (chunksize == 0) { + /* If there is enough space, leave the chunk size unchanged, + * else increase by factor of two until there is enough space. + */ + long bytes; + long space = bitmap->mddev->bitmap_info.space; + + if (space == 0) { + /* We don't know how much space there is, so limit + * to current size - in sectors. + */ + bytes = DIV_ROUND_UP(bitmap->counts.chunks, 8); + if (!bitmap->mddev->bitmap_info.external) + bytes += sizeof(bitmap_super_t); + space = DIV_ROUND_UP(bytes, 512); + bitmap->mddev->bitmap_info.space = space; + } + chunkshift = bitmap->counts.chunkshift; + chunkshift--; + do { + /* 'chunkshift' is shift from block size to chunk size */ + chunkshift++; + chunks = DIV_ROUND_UP_SECTOR_T(blocks, 1 << chunkshift); + bytes = DIV_ROUND_UP(chunks, 8); + if (!bitmap->mddev->bitmap_info.external) + bytes += sizeof(bitmap_super_t); + } while (bytes > (space << 9)); + } else + chunkshift = ffz(~chunksize) - BITMAP_BLOCK_SHIFT; + + chunks = DIV_ROUND_UP_SECTOR_T(blocks, 1 << chunkshift); + memset(&store, 0, sizeof(store)); + if (bitmap->mddev->bitmap_info.offset || bitmap->mddev->bitmap_info.file) + ret = bitmap_storage_alloc(&store, chunks, + !bitmap->mddev->bitmap_info.external); + if (ret) + goto err; + + pages = DIV_ROUND_UP(chunks, PAGE_COUNTER_RATIO); + + new_bp = kzalloc(pages * sizeof(*new_bp), GFP_KERNEL); + ret = -ENOMEM; + if (!new_bp) { + bitmap_file_unmap(&store); + goto err; + } + + if (!init) + bitmap->mddev->pers->quiesce(bitmap->mddev, 1); + + store.file = bitmap->storage.file; + bitmap->storage.file = NULL; + + if (store.sb_page && bitmap->storage.sb_page) + memcpy(page_address(store.sb_page), + page_address(bitmap->storage.sb_page), + sizeof(bitmap_super_t)); + bitmap_file_unmap(&bitmap->storage); + bitmap->storage = store; + + old_counts = bitmap->counts; + bitmap->counts.bp = new_bp; + bitmap->counts.pages = pages; + bitmap->counts.missing_pages = pages; + bitmap->counts.chunkshift = chunkshift; + bitmap->counts.chunks = chunks; + bitmap->mddev->bitmap_info.chunksize = 1 << (chunkshift + + BITMAP_BLOCK_SHIFT); + + blocks = min(old_counts.chunks << old_counts.chunkshift, + chunks << chunkshift); + + spin_lock_irq(&bitmap->counts.lock); + for (block = 0; block < blocks; ) { + bitmap_counter_t *bmc_old, *bmc_new; + int set; + + bmc_old = bitmap_get_counter(&old_counts, block, + &old_blocks, 0); + set = bmc_old && NEEDED(*bmc_old); + + if (set) { + bmc_new = bitmap_get_counter(&bitmap->counts, block, + &new_blocks, 1); + if (*bmc_new == 0) { + /* need to set on-disk bits too. */ + sector_t end = block + new_blocks; + sector_t start = block >> chunkshift; + start <<= chunkshift; + while (start < end) { + bitmap_file_set_bit(bitmap, block); + start += 1 << chunkshift; + } + *bmc_new = 2; + bitmap_count_page(&bitmap->counts, + block, 1); + bitmap_set_pending(&bitmap->counts, + block); + } + *bmc_new |= NEEDED_MASK; + if (new_blocks < old_blocks) + old_blocks = new_blocks; + } + block += old_blocks; + } + + if (!init) { + int i; + while (block < (chunks << chunkshift)) { + bitmap_counter_t *bmc; + bmc = bitmap_get_counter(&bitmap->counts, block, + &new_blocks, 1); + if (bmc) { + /* new space. It needs to be resynced, so + * we set NEEDED_MASK. + */ + if (*bmc == 0) { + *bmc = NEEDED_MASK | 2; + bitmap_count_page(&bitmap->counts, + block, 1); + bitmap_set_pending(&bitmap->counts, + block); + } + } + block += new_blocks; + } + for (i = 0; i < bitmap->storage.file_pages; i++) + set_page_attr(bitmap, i, BITMAP_PAGE_DIRTY); + } + spin_unlock_irq(&bitmap->counts.lock); + + if (!init) { + bitmap_unplug(bitmap); + bitmap->mddev->pers->quiesce(bitmap->mddev, 0); + } + ret = 0; +err: + return ret; +} +EXPORT_SYMBOL_GPL(bitmap_resize); + static ssize_t location_show(struct mddev *mddev, char *page) { @@ -1923,6 +2049,43 @@ location_store(struct mddev *mddev, const char *buf, size_t len) static struct md_sysfs_entry bitmap_location = __ATTR(location, S_IRUGO|S_IWUSR, location_show, location_store); +/* 'bitmap/space' is the space available at 'location' for the + * bitmap. This allows the kernel to know when it is safe to + * resize the bitmap to match a resized array. + */ +static ssize_t +space_show(struct mddev *mddev, char *page) +{ + return sprintf(page, "%lu\n", mddev->bitmap_info.space); +} + +static ssize_t +space_store(struct mddev *mddev, const char *buf, size_t len) +{ + unsigned long sectors; + int rv; + + rv = kstrtoul(buf, 10, §ors); + if (rv) + return rv; + + if (sectors == 0) + return -EINVAL; + + if (mddev->bitmap && + sectors < (mddev->bitmap->storage.bytes + 511) >> 9) + return -EFBIG; /* Bitmap is too big for this small space */ + + /* could make sure it isn't too big, but that isn't really + * needed - user-space should be careful. + */ + mddev->bitmap_info.space = sectors; + return len; +} + +static struct md_sysfs_entry bitmap_space = +__ATTR(space, S_IRUGO|S_IWUSR, space_show, space_store); + static ssize_t timeout_show(struct mddev *mddev, char *page) { @@ -2098,6 +2261,7 @@ __ATTR(max_backlog_used, S_IRUGO | S_IWUSR, static struct attribute *md_bitmap_attrs[] = { &bitmap_location.attr, + &bitmap_space.attr, &bitmap_timeout.attr, &bitmap_backlog.attr, &bitmap_chunksize.attr, diff --git a/drivers/md/bitmap.h b/drivers/md/bitmap.h index b44b0aba2d47..df4aeb6ac6f0 100644 --- a/drivers/md/bitmap.h +++ b/drivers/md/bitmap.h @@ -111,9 +111,9 @@ typedef __u16 bitmap_counter_t; /* use these for bitmap->flags and bitmap->sb->state bit-fields */ enum bitmap_state { - BITMAP_STALE = 0x002, /* the bitmap file is out of date or had -EIO */ - BITMAP_WRITE_ERROR = 0x004, /* A write error has occurred */ - BITMAP_HOSTENDIAN = 0x8000, + BITMAP_STALE = 1, /* the bitmap file is out of date or had -EIO */ + BITMAP_WRITE_ERROR = 2, /* A write error has occurred */ + BITMAP_HOSTENDIAN =15, }; /* the superblock at the front of the bitmap file -- little endian */ @@ -128,8 +128,10 @@ typedef struct bitmap_super_s { __le32 chunksize; /* 52 the bitmap chunk size in bytes */ __le32 daemon_sleep; /* 56 seconds between disk flushes */ __le32 write_behind; /* 60 number of outstanding write-behind writes */ + __le32 sectors_reserved; /* 64 number of 512-byte sectors that are + * reserved for the bitmap. */ - __u8 pad[256 - 64]; /* set to zero */ + __u8 pad[256 - 68]; /* set to zero */ } bitmap_super_t; /* notes: @@ -160,35 +162,48 @@ struct bitmap_page { */ unsigned int hijacked:1; /* + * If any counter in this page is '1' or '2' - and so could be + * cleared then that page is marked as 'pending' + */ + unsigned int pending:1; + /* * count of dirty bits on the page */ - unsigned int count:31; + unsigned int count:30; }; /* the main bitmap structure - one per mddev */ struct bitmap { - struct bitmap_page *bp; - unsigned long pages; /* total number of pages in the bitmap */ - unsigned long missing_pages; /* number of pages not yet allocated */ - struct mddev *mddev; /* the md device that the bitmap is for */ + struct bitmap_counts { + spinlock_t lock; + struct bitmap_page *bp; + unsigned long pages; /* total number of pages + * in the bitmap */ + unsigned long missing_pages; /* number of pages + * not yet allocated */ + unsigned long chunkshift; /* chunksize = 2^chunkshift + * (for bitops) */ + unsigned long chunks; /* Total number of data + * chunks for the array */ + } counts; - /* bitmap chunksize -- how much data does each bit represent? */ - unsigned long chunkshift; /* chunksize = 2^(chunkshift+9) (for bitops) */ - unsigned long chunks; /* total number of data chunks for the array */ + struct mddev *mddev; /* the md device that the bitmap is for */ __u64 events_cleared; int need_sync; - /* bitmap spinlock */ - spinlock_t lock; - - struct file *file; /* backing disk file */ - struct page *sb_page; /* cached copy of the bitmap file superblock */ - struct page **filemap; /* list of cache pages for the file */ - unsigned long *filemap_attr; /* attributes associated w/ filemap pages */ - unsigned long file_pages; /* number of pages in the file */ - int last_page_size; /* bytes in the last page */ + struct bitmap_storage { + struct file *file; /* backing disk file */ + struct page *sb_page; /* cached copy of the bitmap + * file superblock */ + struct page **filemap; /* list of cache pages for + * the file */ + unsigned long *filemap_attr; /* attributes associated + * w/ filemap pages */ + unsigned long file_pages; /* number of pages in the file*/ + unsigned long bytes; /* total bytes in the bitmap */ + } storage; unsigned long flags; @@ -242,6 +257,9 @@ void bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector); void bitmap_unplug(struct bitmap *bitmap); void bitmap_daemon_work(struct mddev *mddev); + +int bitmap_resize(struct bitmap *bitmap, sector_t blocks, + int chunksize, int init); #endif #endif diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index 754f38f8a692..638dae048b4f 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c @@ -18,6 +18,7 @@ #include <linux/slab.h> #include <linux/time.h> #include <linux/workqueue.h> +#include <linux/delay.h> #include <scsi/scsi_dh.h> #include <linux/atomic.h> @@ -61,11 +62,11 @@ struct multipath { struct list_head list; struct dm_target *ti; - spinlock_t lock; - const char *hw_handler_name; char *hw_handler_params; + spinlock_t lock; + unsigned nr_priority_groups; struct list_head priority_groups; @@ -81,16 +82,17 @@ struct multipath { struct priority_group *next_pg; /* Switch to this PG if set */ unsigned repeat_count; /* I/Os left before calling PS again */ - unsigned queue_io; /* Must we queue all I/O? */ - unsigned queue_if_no_path; /* Queue I/O if last path fails? */ - unsigned saved_queue_if_no_path;/* Saved state during suspension */ + unsigned queue_io:1; /* Must we queue all I/O? */ + unsigned queue_if_no_path:1; /* Queue I/O if last path fails? */ + unsigned saved_queue_if_no_path:1; /* Saved state during suspension */ + unsigned pg_init_retries; /* Number of times to retry pg_init */ unsigned pg_init_count; /* Number of times pg_init called */ unsigned pg_init_delay_msecs; /* Number of msecs before pg_init retry */ + unsigned queue_size; struct work_struct process_queued_ios; struct list_head queued_ios; - unsigned queue_size; struct work_struct trigger_event; @@ -328,14 +330,18 @@ static void __choose_pgpath(struct multipath *m, size_t nr_bytes) /* * Loop through priority groups until we find a valid path. * First time we skip PGs marked 'bypassed'. - * Second time we only try the ones we skipped. + * Second time we only try the ones we skipped, but set + * pg_init_delay_retry so we do not hammer controllers. */ do { list_for_each_entry(pg, &m->priority_groups, list) { if (pg->bypassed == bypassed) continue; - if (!__choose_path_in_pg(m, pg, nr_bytes)) + if (!__choose_path_in_pg(m, pg, nr_bytes)) { + if (!bypassed) + m->pg_init_delay_retry = 1; return; + } } } while (bypassed--); @@ -481,9 +487,6 @@ static void process_queued_ios(struct work_struct *work) spin_lock_irqsave(&m->lock, flags); - if (!m->queue_size) - goto out; - if (!m->current_pgpath) __choose_pgpath(m, 0); @@ -496,7 +499,6 @@ static void process_queued_ios(struct work_struct *work) if (m->pg_init_required && !m->pg_init_in_progress && pgpath) __pg_init_all_paths(m); -out: spin_unlock_irqrestore(&m->lock, flags); if (!must_queue) dispatch_queued_ios(m); @@ -1517,11 +1519,16 @@ out: static int multipath_ioctl(struct dm_target *ti, unsigned int cmd, unsigned long arg) { - struct multipath *m = (struct multipath *) ti->private; - struct block_device *bdev = NULL; - fmode_t mode = 0; + struct multipath *m = ti->private; + struct block_device *bdev; + fmode_t mode; unsigned long flags; - int r = 0; + int r; + +again: + bdev = NULL; + mode = 0; + r = 0; spin_lock_irqsave(&m->lock, flags); @@ -1546,6 +1553,12 @@ static int multipath_ioctl(struct dm_target *ti, unsigned int cmd, if (!r && ti->len != i_size_read(bdev->bd_inode) >> SECTOR_SHIFT) r = scsi_verify_blk_ioctl(NULL, cmd); + if (r == -EAGAIN && !fatal_signal_pending(current)) { + queue_work(kmultipathd, &m->process_queued_ios); + msleep(10); + goto again; + } + return r ? : __blkdev_driver_ioctl(bdev, mode, cmd, arg); } @@ -1643,7 +1656,7 @@ out: *---------------------------------------------------------------*/ static struct target_type multipath_target = { .name = "multipath", - .version = {1, 3, 0}, + .version = {1, 4, 0}, .module = THIS_MODULE, .ctr = multipath_ctr, .dtr = multipath_dtr, diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index 68965e663248..017c34d78d61 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c @@ -155,10 +155,7 @@ static void context_free(struct raid_set *rs) for (i = 0; i < rs->md.raid_disks; i++) { if (rs->dev[i].meta_dev) dm_put_device(rs->ti, rs->dev[i].meta_dev); - if (rs->dev[i].rdev.sb_page) - put_page(rs->dev[i].rdev.sb_page); - rs->dev[i].rdev.sb_page = NULL; - rs->dev[i].rdev.sb_loaded = 0; + md_rdev_clear(&rs->dev[i].rdev); if (rs->dev[i].data_dev) dm_put_device(rs->ti, rs->dev[i].data_dev); } @@ -606,7 +603,7 @@ static int read_disk_sb(struct md_rdev *rdev, int size) if (!sync_page_io(rdev, 0, size, rdev->sb_page, READ, 1)) { DMERR("Failed to read superblock of device at position %d", rdev->raid_disk); - set_bit(Faulty, &rdev->flags); + md_error(rdev->mddev, rdev); return -EINVAL; } @@ -617,16 +614,18 @@ static int read_disk_sb(struct md_rdev *rdev, int size) static void super_sync(struct mddev *mddev, struct md_rdev *rdev) { - struct md_rdev *r; + int i; uint64_t failed_devices; struct dm_raid_superblock *sb; + struct raid_set *rs = container_of(mddev, struct raid_set, md); sb = page_address(rdev->sb_page); failed_devices = le64_to_cpu(sb->failed_devices); - rdev_for_each(r, mddev) - if ((r->raid_disk >= 0) && test_bit(Faulty, &r->flags)) - failed_devices |= (1ULL << r->raid_disk); + for (i = 0; i < mddev->raid_disks; i++) + if (!rs->dev[i].data_dev || + test_bit(Faulty, &(rs->dev[i].rdev.flags))) + failed_devices |= (1ULL << i); memset(sb, 0, sizeof(*sb)); @@ -1252,12 +1251,13 @@ static void raid_resume(struct dm_target *ti) { struct raid_set *rs = ti->private; + set_bit(MD_CHANGE_DEVS, &rs->md.flags); if (!rs->bitmap_loaded) { bitmap_load(&rs->md); rs->bitmap_loaded = 1; - } else - md_wakeup_thread(rs->md.thread); + } + clear_bit(MD_RECOVERY_FROZEN, &rs->md.recovery); mddev_resume(&rs->md); } diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c index d039de8322f0..b58b7a33914a 100644 --- a/drivers/md/dm-raid1.c +++ b/drivers/md/dm-raid1.c @@ -1084,6 +1084,7 @@ static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv) ti->split_io = dm_rh_get_region_size(ms->rh); ti->num_flush_requests = 1; ti->num_discard_requests = 1; + ti->discard_zeroes_data_unsupported = 1; ms->kmirrord_wq = alloc_workqueue("kmirrord", WQ_NON_REENTRANT | WQ_MEM_RECLAIM, 0); @@ -1214,7 +1215,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, * We need to dec pending if this was a write. */ if (rw == WRITE) { - if (!(bio->bi_rw & REQ_FLUSH)) + if (!(bio->bi_rw & (REQ_FLUSH | REQ_DISCARD))) dm_rh_dec(ms->rh, map_context->ll); return error; } diff --git a/drivers/md/dm-region-hash.c b/drivers/md/dm-region-hash.c index 7771ed212182..69732e03eb34 100644 --- a/drivers/md/dm-region-hash.c +++ b/drivers/md/dm-region-hash.c @@ -404,6 +404,9 @@ void dm_rh_mark_nosync(struct dm_region_hash *rh, struct bio *bio) return; } + if (bio->bi_rw & REQ_DISCARD) + return; + /* We must inform the log that the sync count has changed. */ log->type->set_region_sync(log, region, 0); @@ -524,7 +527,7 @@ void dm_rh_inc_pending(struct dm_region_hash *rh, struct bio_list *bios) struct bio *bio; for (bio = bios->head; bio; bio = bio->bi_next) { - if (bio->bi_rw & REQ_FLUSH) + if (bio->bi_rw & (REQ_FLUSH | REQ_DISCARD)) continue; rh_inc(rh, dm_rh_bio_to_region(rh, bio)); } diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c index 737d38865b69..3e2907f0bc46 100644 --- a/drivers/md/dm-thin-metadata.c +++ b/drivers/md/dm-thin-metadata.c @@ -1082,12 +1082,89 @@ int dm_pool_get_metadata_transaction_id(struct dm_pool_metadata *pmd, return 0; } -static int __get_held_metadata_root(struct dm_pool_metadata *pmd, - dm_block_t *result) +static int __reserve_metadata_snap(struct dm_pool_metadata *pmd) +{ + int r, inc; + struct thin_disk_superblock *disk_super; + struct dm_block *copy, *sblock; + dm_block_t held_root; + + /* + * Copy the superblock. + */ + dm_sm_inc_block(pmd->metadata_sm, THIN_SUPERBLOCK_LOCATION); + r = dm_tm_shadow_block(pmd->tm, THIN_SUPERBLOCK_LOCATION, + &sb_validator, ©, &inc); + if (r) + return r; + + BUG_ON(!inc); + + held_root = dm_block_location(copy); + disk_super = dm_block_data(copy); + + if (le64_to_cpu(disk_super->held_root)) { + DMWARN("Pool metadata snapshot already exists: release this before taking another."); + + dm_tm_dec(pmd->tm, held_root); + dm_tm_unlock(pmd->tm, copy); + pmd->need_commit = 1; + + return -EBUSY; + } + + /* + * Wipe the spacemap since we're not publishing this. + */ + memset(&disk_super->data_space_map_root, 0, + sizeof(disk_super->data_space_map_root)); + memset(&disk_super->metadata_space_map_root, 0, + sizeof(disk_super->metadata_space_map_root)); + + /* + * Increment the data structures that need to be preserved. + */ + dm_tm_inc(pmd->tm, le64_to_cpu(disk_super->data_mapping_root)); + dm_tm_inc(pmd->tm, le64_to_cpu(disk_super->device_details_root)); + dm_tm_unlock(pmd->tm, copy); + + /* + * Write the held root into the superblock. + */ + r = dm_bm_write_lock(pmd->bm, THIN_SUPERBLOCK_LOCATION, + &sb_validator, &sblock); + if (r) { + dm_tm_dec(pmd->tm, held_root); + pmd->need_commit = 1; + return r; + } + + disk_super = dm_block_data(sblock); + disk_super->held_root = cpu_to_le64(held_root); + dm_bm_unlock(sblock); + + pmd->need_commit = 1; + + return 0; +} + +int dm_pool_reserve_metadata_snap(struct dm_pool_metadata *pmd) +{ + int r; + + down_write(&pmd->root_lock); + r = __reserve_metadata_snap(pmd); + up_write(&pmd->root_lock); + + return r; +} + +static int __release_metadata_snap(struct dm_pool_metadata *pmd) { int r; struct thin_disk_superblock *disk_super; - struct dm_block *sblock; + struct dm_block *sblock, *copy; + dm_block_t held_root; r = dm_bm_write_lock(pmd->bm, THIN_SUPERBLOCK_LOCATION, &sb_validator, &sblock); @@ -1095,18 +1172,65 @@ static int __get_held_metadata_root(struct dm_pool_metadata *pmd, return r; disk_super = dm_block_data(sblock); + held_root = le64_to_cpu(disk_super->held_root); + disk_super->held_root = cpu_to_le64(0); + pmd->need_commit = 1; + + dm_bm_unlock(sblock); + + if (!held_root) { + DMWARN("No pool metadata snapshot found: nothing to release."); + return -EINVAL; + } + + r = dm_tm_read_lock(pmd->tm, held_root, &sb_validator, ©); + if (r) + return r; + + disk_super = dm_block_data(copy); + dm_sm_dec_block(pmd->metadata_sm, le64_to_cpu(disk_super->data_mapping_root)); + dm_sm_dec_block(pmd->metadata_sm, le64_to_cpu(disk_super->device_details_root)); + dm_sm_dec_block(pmd->metadata_sm, held_root); + + return dm_tm_unlock(pmd->tm, copy); +} + +int dm_pool_release_metadata_snap(struct dm_pool_metadata *pmd) +{ + int r; + + down_write(&pmd->root_lock); + r = __release_metadata_snap(pmd); + up_write(&pmd->root_lock); + + return r; +} + +static int __get_metadata_snap(struct dm_pool_metadata *pmd, + dm_block_t *result) +{ + int r; + struct thin_disk_superblock *disk_super; + struct dm_block *sblock; + + r = dm_bm_read_lock(pmd->bm, THIN_SUPERBLOCK_LOCATION, + &sb_validator, &sblock); + if (r) + return r; + + disk_super = dm_block_data(sblock); *result = le64_to_cpu(disk_super->held_root); return dm_bm_unlock(sblock); } -int dm_pool_get_held_metadata_root(struct dm_pool_metadata *pmd, - dm_block_t *result) +int dm_pool_get_metadata_snap(struct dm_pool_metadata *pmd, + dm_block_t *result) { int r; down_read(&pmd->root_lock); - r = __get_held_metadata_root(pmd, result); + r = __get_metadata_snap(pmd, result); up_read(&pmd->root_lock); return r; diff --git a/drivers/md/dm-thin-metadata.h b/drivers/md/dm-thin-metadata.h index ed4725e67c96..b88918ccdaf6 100644 --- a/drivers/md/dm-thin-metadata.h +++ b/drivers/md/dm-thin-metadata.h @@ -90,11 +90,18 @@ int dm_pool_get_metadata_transaction_id(struct dm_pool_metadata *pmd, /* * Hold/get root for userspace transaction. + * + * The metadata snapshot is a copy of the current superblock (minus the + * space maps). Userland can access the data structures for READ + * operations only. A small performance hit is incurred by providing this + * copy of the metadata to userland due to extra copy-on-write operations + * on the metadata nodes. Release this as soon as you finish with it. */ -int dm_pool_hold_metadata_root(struct dm_pool_metadata *pmd); +int dm_pool_reserve_metadata_snap(struct dm_pool_metadata *pmd); +int dm_pool_release_metadata_snap(struct dm_pool_metadata *pmd); -int dm_pool_get_held_metadata_root(struct dm_pool_metadata *pmd, - dm_block_t *result); +int dm_pool_get_metadata_snap(struct dm_pool_metadata *pmd, + dm_block_t *result); /* * Actions on a single virtual device. diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index eb3d138ff55a..68694da0d21d 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c @@ -111,7 +111,7 @@ struct cell_key { dm_block_t block; }; -struct cell { +struct dm_bio_prison_cell { struct hlist_node list; struct bio_prison *prison; struct cell_key key; @@ -141,6 +141,8 @@ static uint32_t calc_nr_buckets(unsigned nr_cells) return n; } +static struct kmem_cache *_cell_cache; + /* * @nr_cells should be the number of cells you want in use _concurrently_. * Don't confuse it with the number of distinct keys. @@ -157,8 +159,7 @@ static struct bio_prison *prison_create(unsigned nr_cells) return NULL; spin_lock_init(&prison->lock); - prison->cell_pool = mempool_create_kmalloc_pool(nr_cells, - sizeof(struct cell)); + prison->cell_pool = mempool_create_slab_pool(nr_cells, _cell_cache); if (!prison->cell_pool) { kfree(prison); return NULL; @@ -194,10 +195,10 @@ static int keys_equal(struct cell_key *lhs, struct cell_key *rhs) (lhs->block == rhs->block); } -static struct cell *__search_bucket(struct hlist_head *bucket, - struct cell_key *key) +static struct dm_bio_prison_cell *__search_bucket(struct hlist_head *bucket, + struct cell_key *key) { - struct cell *cell; + struct dm_bio_prison_cell *cell; struct hlist_node *tmp; hlist_for_each_entry(cell, tmp, bucket, list) @@ -214,12 +215,12 @@ static struct cell *__search_bucket(struct hlist_head *bucket, * Returns 1 if the cell was already held, 0 if @inmate is the new holder. */ static int bio_detain(struct bio_prison *prison, struct cell_key *key, - struct bio *inmate, struct cell **ref) + struct bio *inmate, struct dm_bio_prison_cell **ref) { int r = 1; unsigned long flags; uint32_t hash = hash_key(prison, key); - struct cell *cell, *cell2; + struct dm_bio_prison_cell *cell, *cell2; BUG_ON(hash > prison->nr_buckets); @@ -273,7 +274,7 @@ out: /* * @inmates must have been initialised prior to this call */ -static void __cell_release(struct cell *cell, struct bio_list *inmates) +static void __cell_release(struct dm_bio_prison_cell *cell, struct bio_list *inmates) { struct bio_prison *prison = cell->prison; @@ -287,7 +288,7 @@ static void __cell_release(struct cell *cell, struct bio_list *inmates) mempool_free(cell, prison->cell_pool); } -static void cell_release(struct cell *cell, struct bio_list *bios) +static void cell_release(struct dm_bio_prison_cell *cell, struct bio_list *bios) { unsigned long flags; struct bio_prison *prison = cell->prison; @@ -303,7 +304,7 @@ static void cell_release(struct cell *cell, struct bio_list *bios) * bio may be in the cell. This function releases the cell, and also does * a sanity check. */ -static void __cell_release_singleton(struct cell *cell, struct bio *bio) +static void __cell_release_singleton(struct dm_bio_prison_cell *cell, struct bio *bio) { BUG_ON(cell->holder != bio); BUG_ON(!bio_list_empty(&cell->bios)); @@ -311,7 +312,7 @@ static void __cell_release_singleton(struct cell *cell, struct bio *bio) __cell_release(cell, NULL); } -static void cell_release_singleton(struct cell *cell, struct bio *bio) +static void cell_release_singleton(struct dm_bio_prison_cell *cell, struct bio *bio) { unsigned long flags; struct bio_prison *prison = cell->prison; @@ -324,7 +325,8 @@ static void cell_release_singleton(struct cell *cell, struct bio *bio) /* * Sometimes we don't want the holder, just the additional bios. */ -static void __cell_release_no_holder(struct cell *cell, struct bio_list *inmates) +static void __cell_release_no_holder(struct dm_bio_prison_cell *cell, + struct bio_list *inmates) { struct bio_prison *prison = cell->prison; @@ -334,7 +336,8 @@ static void __cell_release_no_holder(struct cell *cell, struct bio_list *inmates mempool_free(cell, prison->cell_pool); } -static void cell_release_no_holder(struct cell *cell, struct bio_list *inmates) +static void cell_release_no_holder(struct dm_bio_prison_cell *cell, + struct bio_list *inmates) { unsigned long flags; struct bio_prison *prison = cell->prison; @@ -344,7 +347,7 @@ static void cell_release_no_holder(struct cell *cell, struct bio_list *inmates) spin_unlock_irqrestore(&prison->lock, flags); } -static void cell_error(struct cell *cell) +static void cell_error(struct dm_bio_prison_cell *cell) { struct bio_prison *prison = cell->prison; struct bio_list bios; @@ -491,7 +494,7 @@ static void build_virtual_key(struct dm_thin_device *td, dm_block_t b, * also provides the interface for creating and destroying internal * devices. */ -struct new_mapping; +struct dm_thin_new_mapping; struct pool_features { unsigned zero_new_blocks:1; @@ -537,7 +540,7 @@ struct pool { struct deferred_set shared_read_ds; struct deferred_set all_io_ds; - struct new_mapping *next_mapping; + struct dm_thin_new_mapping *next_mapping; mempool_t *mapping_pool; mempool_t *endio_hook_pool; }; @@ -630,11 +633,11 @@ static struct pool *__pool_table_lookup_metadata_dev(struct block_device *md_dev /*----------------------------------------------------------------*/ -struct endio_hook { +struct dm_thin_endio_hook { struct thin_c *tc; struct deferred_entry *shared_read_entry; struct deferred_entry *all_io_entry; - struct new_mapping *overwrite_mapping; + struct dm_thin_new_mapping *overwrite_mapping; }; static void __requeue_bio_list(struct thin_c *tc, struct bio_list *master) @@ -647,7 +650,8 @@ static void __requeue_bio_list(struct thin_c *tc, struct bio_list *master) bio_list_init(master); while ((bio = bio_list_pop(&bios))) { - struct endio_hook *h = dm_get_mapinfo(bio)->ptr; + struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr; + if (h->tc == tc) bio_endio(bio, DM_ENDIO_REQUEUE); else @@ -736,7 +740,7 @@ static void wake_worker(struct pool *pool) /* * Bio endio functions. */ -struct new_mapping { +struct dm_thin_new_mapping { struct list_head list; unsigned quiesced:1; @@ -746,7 +750,7 @@ struct new_mapping { struct thin_c *tc; dm_block_t virt_block; dm_block_t data_block; - struct cell *cell, *cell2; + struct dm_bio_prison_cell *cell, *cell2; int err; /* @@ -759,7 +763,7 @@ struct new_mapping { bio_end_io_t *saved_bi_end_io; }; -static void __maybe_add_mapping(struct new_mapping *m) +static void __maybe_add_mapping(struct dm_thin_new_mapping *m) { struct pool *pool = m->tc->pool; @@ -772,7 +776,7 @@ static void __maybe_add_mapping(struct new_mapping *m) static void copy_complete(int read_err, unsigned long write_err, void *context) { unsigned long flags; - struct new_mapping *m = context; + struct dm_thin_new_mapping *m = context; struct pool *pool = m->tc->pool; m->err = read_err || write_err ? -EIO : 0; @@ -786,8 +790,8 @@ static void copy_complete(int read_err, unsigned long write_err, void *context) static void overwrite_endio(struct bio *bio, int err) { unsigned long flags; - struct endio_hook *h = dm_get_mapinfo(bio)->ptr; - struct new_mapping *m = h->overwrite_mapping; + struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr; + struct dm_thin_new_mapping *m = h->overwrite_mapping; struct pool *pool = m->tc->pool; m->err = err; @@ -811,7 +815,7 @@ static void overwrite_endio(struct bio *bio, int err) /* * This sends the bios in the cell back to the deferred_bios list. */ -static void cell_defer(struct thin_c *tc, struct cell *cell, +static void cell_defer(struct thin_c *tc, struct dm_bio_prison_cell *cell, dm_block_t data_block) { struct pool *pool = tc->pool; @@ -828,7 +832,7 @@ static void cell_defer(struct thin_c *tc, struct cell *cell, * Same as cell_defer above, except it omits one particular detainee, * a write bio that covers the block and has already been processed. */ -static void cell_defer_except(struct thin_c *tc, struct cell *cell) +static void cell_defer_except(struct thin_c *tc, struct dm_bio_prison_cell *cell) { struct bio_list bios; struct pool *pool = tc->pool; @@ -843,7 +847,7 @@ static void cell_defer_except(struct thin_c *tc, struct cell *cell) wake_worker(pool); } -static void process_prepared_mapping(struct new_mapping *m) +static void process_prepared_mapping(struct dm_thin_new_mapping *m) { struct thin_c *tc = m->tc; struct bio *bio; @@ -886,7 +890,7 @@ static void process_prepared_mapping(struct new_mapping *m) mempool_free(m, tc->pool->mapping_pool); } -static void process_prepared_discard(struct new_mapping *m) +static void process_prepared_discard(struct dm_thin_new_mapping *m) { int r; struct thin_c *tc = m->tc; @@ -909,11 +913,11 @@ static void process_prepared_discard(struct new_mapping *m) } static void process_prepared(struct pool *pool, struct list_head *head, - void (*fn)(struct new_mapping *)) + void (*fn)(struct dm_thin_new_mapping *)) { unsigned long flags; struct list_head maps; - struct new_mapping *m, *tmp; + struct dm_thin_new_mapping *m, *tmp; INIT_LIST_HEAD(&maps); spin_lock_irqsave(&pool->lock, flags); @@ -957,9 +961,9 @@ static int ensure_next_mapping(struct pool *pool) return pool->next_mapping ? 0 : -ENOMEM; } -static struct new_mapping *get_next_mapping(struct pool *pool) +static struct dm_thin_new_mapping *get_next_mapping(struct pool *pool) { - struct new_mapping *r = pool->next_mapping; + struct dm_thin_new_mapping *r = pool->next_mapping; BUG_ON(!pool->next_mapping); @@ -971,11 +975,11 @@ static struct new_mapping *get_next_mapping(struct pool *pool) static void schedule_copy(struct thin_c *tc, dm_block_t virt_block, struct dm_dev *origin, dm_block_t data_origin, dm_block_t data_dest, - struct cell *cell, struct bio *bio) + struct dm_bio_prison_cell *cell, struct bio *bio) { int r; struct pool *pool = tc->pool; - struct new_mapping *m = get_next_mapping(pool); + struct dm_thin_new_mapping *m = get_next_mapping(pool); INIT_LIST_HEAD(&m->list); m->quiesced = 0; @@ -997,7 +1001,8 @@ static void schedule_copy(struct thin_c *tc, dm_block_t virt_block, * bio immediately. Otherwise we use kcopyd to clone the data first. */ if (io_overwrites_block(pool, bio)) { - struct endio_hook *h = dm_get_mapinfo(bio)->ptr; + struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr; + h->overwrite_mapping = m; m->bio = bio; save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio); @@ -1025,7 +1030,7 @@ static void schedule_copy(struct thin_c *tc, dm_block_t virt_block, static void schedule_internal_copy(struct thin_c *tc, dm_block_t virt_block, dm_block_t data_origin, dm_block_t data_dest, - struct cell *cell, struct bio *bio) + struct dm_bio_prison_cell *cell, struct bio *bio) { schedule_copy(tc, virt_block, tc->pool_dev, data_origin, data_dest, cell, bio); @@ -1033,18 +1038,18 @@ static void schedule_internal_copy(struct thin_c *tc, dm_block_t virt_block, static void schedule_external_copy(struct thin_c *tc, dm_block_t virt_block, dm_block_t data_dest, - struct cell *cell, struct bio *bio) + struct dm_bio_prison_cell *cell, struct bio *bio) { schedule_copy(tc, virt_block, tc->origin_dev, virt_block, data_dest, cell, bio); } static void schedule_zero(struct thin_c *tc, dm_block_t virt_block, - dm_block_t data_block, struct cell *cell, + dm_block_t data_block, struct dm_bio_prison_cell *cell, struct bio *bio) { struct pool *pool = tc->pool; - struct new_mapping *m = get_next_mapping(pool); + struct dm_thin_new_mapping *m = get_next_mapping(pool); INIT_LIST_HEAD(&m->list); m->quiesced = 1; @@ -1065,12 +1070,12 @@ static void schedule_zero(struct thin_c *tc, dm_block_t virt_block, process_prepared_mapping(m); else if (io_overwrites_block(pool, bio)) { - struct endio_hook *h = dm_get_mapinfo(bio)->ptr; + struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr; + h->overwrite_mapping = m; m->bio = bio; save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio); remap_and_issue(tc, bio, data_block); - } else { int r; struct dm_io_region to; @@ -1155,7 +1160,7 @@ static int alloc_data_block(struct thin_c *tc, dm_block_t *result) */ static void retry_on_resume(struct bio *bio) { - struct endio_hook *h = dm_get_mapinfo(bio)->ptr; + struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr; struct thin_c *tc = h->tc; struct pool *pool = tc->pool; unsigned long flags; @@ -1165,7 +1170,7 @@ static void retry_on_resume(struct bio *bio) spin_unlock_irqrestore(&pool->lock, flags); } -static void no_space(struct cell *cell) +static void no_space(struct dm_bio_prison_cell *cell) { struct bio *bio; struct bio_list bios; @@ -1182,11 +1187,11 @@ static void process_discard(struct thin_c *tc, struct bio *bio) int r; unsigned long flags; struct pool *pool = tc->pool; - struct cell *cell, *cell2; + struct dm_bio_prison_cell *cell, *cell2; struct cell_key key, key2; dm_block_t block = get_bio_block(tc, bio); struct dm_thin_lookup_result lookup_result; - struct new_mapping *m; + struct dm_thin_new_mapping *m; build_virtual_key(tc->td, block, &key); if (bio_detain(tc->pool->prison, &key, bio, &cell)) @@ -1240,7 +1245,10 @@ static void process_discard(struct thin_c *tc, struct bio *bio) cell_release_singleton(cell, bio); cell_release_singleton(cell2, bio); - remap_and_issue(tc, bio, lookup_result.block); + if ((!lookup_result.shared) && pool->pf.discard_passdown) + remap_and_issue(tc, bio, lookup_result.block); + else + bio_endio(bio, 0); } break; @@ -1263,7 +1271,7 @@ static void process_discard(struct thin_c *tc, struct bio *bio) static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block, struct cell_key *key, struct dm_thin_lookup_result *lookup_result, - struct cell *cell) + struct dm_bio_prison_cell *cell) { int r; dm_block_t data_block; @@ -1290,7 +1298,7 @@ static void process_shared_bio(struct thin_c *tc, struct bio *bio, dm_block_t block, struct dm_thin_lookup_result *lookup_result) { - struct cell *cell; + struct dm_bio_prison_cell *cell; struct pool *pool = tc->pool; struct cell_key key; @@ -1305,7 +1313,7 @@ static void process_shared_bio(struct thin_c *tc, struct bio *bio, if (bio_data_dir(bio) == WRITE) break_sharing(tc, bio, block, &key, lookup_result, cell); else { - struct endio_hook *h = dm_get_mapinfo(bio)->ptr; + struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr; h->shared_read_entry = ds_inc(&pool->shared_read_ds); @@ -1315,7 +1323,7 @@ static void process_shared_bio(struct thin_c *tc, struct bio *bio, } static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block, - struct cell *cell) + struct dm_bio_prison_cell *cell) { int r; dm_block_t data_block; @@ -1363,7 +1371,7 @@ static void process_bio(struct thin_c *tc, struct bio *bio) { int r; dm_block_t block = get_bio_block(tc, bio); - struct cell *cell; + struct dm_bio_prison_cell *cell; struct cell_key key; struct dm_thin_lookup_result lookup_result; @@ -1432,7 +1440,7 @@ static void process_deferred_bios(struct pool *pool) spin_unlock_irqrestore(&pool->lock, flags); while ((bio = bio_list_pop(&bios))) { - struct endio_hook *h = dm_get_mapinfo(bio)->ptr; + struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr; struct thin_c *tc = h->tc; /* @@ -1522,10 +1530,10 @@ static void thin_defer_bio(struct thin_c *tc, struct bio *bio) wake_worker(pool); } -static struct endio_hook *thin_hook_bio(struct thin_c *tc, struct bio *bio) +static struct dm_thin_endio_hook *thin_hook_bio(struct thin_c *tc, struct bio *bio) { struct pool *pool = tc->pool; - struct endio_hook *h = mempool_alloc(pool->endio_hook_pool, GFP_NOIO); + struct dm_thin_endio_hook *h = mempool_alloc(pool->endio_hook_pool, GFP_NOIO); h->tc = tc; h->shared_read_entry = NULL; @@ -1687,6 +1695,9 @@ static void __pool_destroy(struct pool *pool) kfree(pool); } +static struct kmem_cache *_new_mapping_cache; +static struct kmem_cache *_endio_hook_cache; + static struct pool *pool_create(struct mapped_device *pool_md, struct block_device *metadata_dev, unsigned long block_size, char **error) @@ -1755,16 +1766,16 @@ static struct pool *pool_create(struct mapped_device *pool_md, ds_init(&pool->all_io_ds); pool->next_mapping = NULL; - pool->mapping_pool = - mempool_create_kmalloc_pool(MAPPING_POOL_SIZE, sizeof(struct new_mapping)); + pool->mapping_pool = mempool_create_slab_pool(MAPPING_POOL_SIZE, + _new_mapping_cache); if (!pool->mapping_pool) { *error = "Error creating pool's mapping mempool"; err_p = ERR_PTR(-ENOMEM); goto bad_mapping_pool; } - pool->endio_hook_pool = - mempool_create_kmalloc_pool(ENDIO_HOOK_POOL_SIZE, sizeof(struct endio_hook)); + pool->endio_hook_pool = mempool_create_slab_pool(ENDIO_HOOK_POOL_SIZE, + _endio_hook_cache); if (!pool->endio_hook_pool) { *error = "Error creating pool's endio_hook mempool"; err_p = ERR_PTR(-ENOMEM); @@ -2276,6 +2287,43 @@ static int process_set_transaction_id_mesg(unsigned argc, char **argv, struct po return 0; } +static int process_reserve_metadata_snap_mesg(unsigned argc, char **argv, struct pool *pool) +{ + int r; + + r = check_arg_count(argc, 1); + if (r) + return r; + + r = dm_pool_commit_metadata(pool->pmd); + if (r) { + DMERR("%s: dm_pool_commit_metadata() failed, error = %d", + __func__, r); + return r; + } + + r = dm_pool_reserve_metadata_snap(pool->pmd); + if (r) + DMWARN("reserve_metadata_snap message failed."); + + return r; +} + +static int process_release_metadata_snap_mesg(unsigned argc, char **argv, struct pool *pool) +{ + int r; + + r = check_arg_count(argc, 1); + if (r) + return r; + + r = dm_pool_release_metadata_snap(pool->pmd); + if (r) + DMWARN("release_metadata_snap message failed."); + + return r; +} + /* * Messages supported: * create_thin <dev_id> @@ -2283,6 +2331,8 @@ static int process_set_transaction_id_mesg(unsigned argc, char **argv, struct po * delete <dev_id> * trim <dev_id> <new_size_in_sectors> * set_transaction_id <current_trans_id> <new_trans_id> + * reserve_metadata_snap + * release_metadata_snap */ static int pool_message(struct dm_target *ti, unsigned argc, char **argv) { @@ -2302,6 +2352,12 @@ static int pool_message(struct dm_target *ti, unsigned argc, char **argv) else if (!strcasecmp(argv[0], "set_transaction_id")) r = process_set_transaction_id_mesg(argc, argv, pool); + else if (!strcasecmp(argv[0], "reserve_metadata_snap")) + r = process_reserve_metadata_snap_mesg(argc, argv, pool); + + else if (!strcasecmp(argv[0], "release_metadata_snap")) + r = process_release_metadata_snap_mesg(argc, argv, pool); + else DMWARN("Unrecognised thin pool target message received: %s", argv[0]); @@ -2361,7 +2417,7 @@ static int pool_status(struct dm_target *ti, status_type_t type, if (r) return r; - r = dm_pool_get_held_metadata_root(pool->pmd, &held_root); + r = dm_pool_get_metadata_snap(pool->pmd, &held_root); if (r) return r; @@ -2457,7 +2513,7 @@ static struct target_type pool_target = { .name = "thin-pool", .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE | DM_TARGET_IMMUTABLE, - .version = {1, 1, 0}, + .version = {1, 2, 0}, .module = THIS_MODULE, .ctr = pool_ctr, .dtr = pool_dtr, @@ -2575,6 +2631,7 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv) if (tc->pool->pf.discard_enabled) { ti->discards_supported = 1; ti->num_discard_requests = 1; + ti->discard_zeroes_data_unsupported = 1; } dm_put(pool_md); @@ -2613,9 +2670,9 @@ static int thin_endio(struct dm_target *ti, union map_info *map_context) { unsigned long flags; - struct endio_hook *h = map_context->ptr; + struct dm_thin_endio_hook *h = map_context->ptr; struct list_head work; - struct new_mapping *m, *tmp; + struct dm_thin_new_mapping *m, *tmp; struct pool *pool = h->tc->pool; if (h->shared_read_entry) { @@ -2755,7 +2812,32 @@ static int __init dm_thin_init(void) r = dm_register_target(&pool_target); if (r) - dm_unregister_target(&thin_target); + goto bad_pool_target; + + r = -ENOMEM; + + _cell_cache = KMEM_CACHE(dm_bio_prison_cell, 0); + if (!_cell_cache) + goto bad_cell_cache; + + _new_mapping_cache = KMEM_CACHE(dm_thin_new_mapping, 0); + if (!_new_mapping_cache) + goto bad_new_mapping_cache; + + _endio_hook_cache = KMEM_CACHE(dm_thin_endio_hook, 0); + if (!_endio_hook_cache) + goto bad_endio_hook_cache; + + return 0; + +bad_endio_hook_cache: + kmem_cache_destroy(_new_mapping_cache); +bad_new_mapping_cache: + kmem_cache_destroy(_cell_cache); +bad_cell_cache: + dm_unregister_target(&pool_target); +bad_pool_target: + dm_unregister_target(&thin_target); return r; } @@ -2764,6 +2846,10 @@ static void dm_thin_exit(void) { dm_unregister_target(&thin_target); dm_unregister_target(&pool_target); + + kmem_cache_destroy(_cell_cache); + kmem_cache_destroy(_new_mapping_cache); + kmem_cache_destroy(_endio_hook_cache); } module_init(dm_thin_init); diff --git a/drivers/md/md.c b/drivers/md/md.c index 01233d855eb2..d5ab4493c8be 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -402,6 +402,7 @@ void mddev_resume(struct mddev *mddev) wake_up(&mddev->sb_wait); mddev->pers->quiesce(mddev, 0); + set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); md_wakeup_thread(mddev->thread); md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */ } @@ -452,7 +453,7 @@ static void submit_flushes(struct work_struct *ws) atomic_inc(&rdev->nr_pending); atomic_inc(&rdev->nr_pending); rcu_read_unlock(); - bi = bio_alloc_mddev(GFP_KERNEL, 0, mddev); + bi = bio_alloc_mddev(GFP_NOIO, 0, mddev); bi->bi_end_io = md_end_flush; bi->bi_private = rdev; bi->bi_bdev = rdev->bdev; @@ -607,6 +608,7 @@ void mddev_init(struct mddev *mddev) init_waitqueue_head(&mddev->sb_wait); init_waitqueue_head(&mddev->recovery_wait); mddev->reshape_position = MaxSector; + mddev->reshape_backwards = 0; mddev->resync_min = 0; mddev->resync_max = MaxSector; mddev->level = LEVEL_NONE; @@ -802,7 +804,7 @@ static int alloc_disk_sb(struct md_rdev * rdev) return 0; } -static void free_disk_sb(struct md_rdev * rdev) +void md_rdev_clear(struct md_rdev *rdev) { if (rdev->sb_page) { put_page(rdev->sb_page); @@ -815,8 +817,10 @@ static void free_disk_sb(struct md_rdev * rdev) put_page(rdev->bb_page); rdev->bb_page = NULL; } + kfree(rdev->badblocks.page); + rdev->badblocks.page = NULL; } - +EXPORT_SYMBOL_GPL(md_rdev_clear); static void super_written(struct bio *bio, int error) { @@ -887,6 +891,10 @@ int sync_page_io(struct md_rdev *rdev, sector_t sector, int size, rdev->meta_bdev : rdev->bdev; if (metadata_op) bio->bi_sector = sector + rdev->sb_start; + else if (rdev->mddev->reshape_position != MaxSector && + (rdev->mddev->reshape_backwards == + (sector >= rdev->mddev->reshape_position))) + bio->bi_sector = sector + rdev->new_data_offset; else bio->bi_sector = sector + rdev->data_offset; bio_add_page(bio, page, size, 0); @@ -1034,12 +1042,17 @@ static unsigned int calc_sb_csum(mdp_super_t * sb) struct super_type { char *name; struct module *owner; - int (*load_super)(struct md_rdev *rdev, struct md_rdev *refdev, + int (*load_super)(struct md_rdev *rdev, + struct md_rdev *refdev, int minor_version); - int (*validate_super)(struct mddev *mddev, struct md_rdev *rdev); - void (*sync_super)(struct mddev *mddev, struct md_rdev *rdev); + int (*validate_super)(struct mddev *mddev, + struct md_rdev *rdev); + void (*sync_super)(struct mddev *mddev, + struct md_rdev *rdev); unsigned long long (*rdev_size_change)(struct md_rdev *rdev, sector_t num_sectors); + int (*allow_new_offset)(struct md_rdev *rdev, + unsigned long long new_offset); }; /* @@ -1111,6 +1124,7 @@ static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor rdev->preferred_minor = sb->md_minor; rdev->data_offset = 0; + rdev->new_data_offset = 0; rdev->sb_size = MD_SB_BYTES; rdev->badblocks.shift = -1; @@ -1184,7 +1198,11 @@ static int super_90_validate(struct mddev *mddev, struct md_rdev *rdev) mddev->dev_sectors = ((sector_t)sb->size) * 2; mddev->events = ev1; mddev->bitmap_info.offset = 0; + mddev->bitmap_info.space = 0; + /* bitmap can use 60 K after the 4K superblocks */ mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9; + mddev->bitmap_info.default_space = 64*2 - (MD_SB_BYTES >> 9); + mddev->reshape_backwards = 0; if (mddev->minor_version >= 91) { mddev->reshape_position = sb->reshape_position; @@ -1192,6 +1210,8 @@ static int super_90_validate(struct mddev *mddev, struct md_rdev *rdev) mddev->new_level = sb->new_level; mddev->new_layout = sb->new_layout; mddev->new_chunk_sectors = sb->new_chunk >> 9; + if (mddev->delta_disks < 0) + mddev->reshape_backwards = 1; } else { mddev->reshape_position = MaxSector; mddev->delta_disks = 0; @@ -1218,9 +1238,12 @@ static int super_90_validate(struct mddev *mddev, struct md_rdev *rdev) mddev->max_disks = MD_SB_DISKS; if (sb->state & (1<<MD_SB_BITMAP_PRESENT) && - mddev->bitmap_info.file == NULL) + mddev->bitmap_info.file == NULL) { mddev->bitmap_info.offset = mddev->bitmap_info.default_offset; + mddev->bitmap_info.space = + mddev->bitmap_info.space; + } } else if (mddev->pers == NULL) { /* Insist on good event counter while assembling, except @@ -1434,6 +1457,12 @@ super_90_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors) return num_sectors; } +static int +super_90_allow_new_offset(struct md_rdev *rdev, unsigned long long new_offset) +{ + /* non-zero offset changes not possible with v0.90 */ + return new_offset == 0; +} /* * version 1 superblock @@ -1469,6 +1498,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_ struct mdp_superblock_1 *sb; int ret; sector_t sb_start; + sector_t sectors; char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE]; int bmask; @@ -1523,9 +1553,18 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_ bdevname(rdev->bdev,b)); return -EINVAL; } + if (sb->pad0 || + sb->pad3[0] || + memcmp(sb->pad3, sb->pad3+1, sizeof(sb->pad3) - sizeof(sb->pad3[1]))) + /* Some padding is non-zero, might be a new feature */ + return -EINVAL; rdev->preferred_minor = 0xffff; rdev->data_offset = le64_to_cpu(sb->data_offset); + rdev->new_data_offset = rdev->data_offset; + if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE) && + (le32_to_cpu(sb->feature_map) & MD_FEATURE_NEW_OFFSET)) + rdev->new_data_offset += (s32)le32_to_cpu(sb->new_offset); atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read)); rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256; @@ -1536,6 +1575,9 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_ if (minor_version && rdev->data_offset < sb_start + (rdev->sb_size/512)) return -EINVAL; + if (minor_version + && rdev->new_data_offset < sb_start + (rdev->sb_size/512)) + return -EINVAL; if (sb->level == cpu_to_le32(LEVEL_MULTIPATH)) rdev->desc_nr = -1; @@ -1607,16 +1649,14 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_ else ret = 0; } - if (minor_version) - rdev->sectors = (i_size_read(rdev->bdev->bd_inode) >> 9) - - le64_to_cpu(sb->data_offset); - else - rdev->sectors = rdev->sb_start; - if (rdev->sectors < le64_to_cpu(sb->data_size)) + if (minor_version) { + sectors = (i_size_read(rdev->bdev->bd_inode) >> 9); + sectors -= rdev->data_offset; + } else + sectors = rdev->sb_start; + if (sectors < le64_to_cpu(sb->data_size)) return -EINVAL; rdev->sectors = le64_to_cpu(sb->data_size); - if (le64_to_cpu(sb->size) > rdev->sectors) - return -EINVAL; return ret; } @@ -1644,17 +1684,37 @@ static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev) mddev->dev_sectors = le64_to_cpu(sb->size); mddev->events = ev1; mddev->bitmap_info.offset = 0; + mddev->bitmap_info.space = 0; + /* Default location for bitmap is 1K after superblock + * using 3K - total of 4K + */ mddev->bitmap_info.default_offset = 1024 >> 9; - + mddev->bitmap_info.default_space = (4096-1024) >> 9; + mddev->reshape_backwards = 0; + mddev->recovery_cp = le64_to_cpu(sb->resync_offset); memcpy(mddev->uuid, sb->set_uuid, 16); mddev->max_disks = (4096-256)/2; if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) && - mddev->bitmap_info.file == NULL ) + mddev->bitmap_info.file == NULL) { mddev->bitmap_info.offset = (__s32)le32_to_cpu(sb->bitmap_offset); + /* Metadata doesn't record how much space is available. + * For 1.0, we assume we can use up to the superblock + * if before, else to 4K beyond superblock. + * For others, assume no change is possible. + */ + if (mddev->minor_version > 0) + mddev->bitmap_info.space = 0; + else if (mddev->bitmap_info.offset > 0) + mddev->bitmap_info.space = + 8 - mddev->bitmap_info.offset; + else + mddev->bitmap_info.space = + -mddev->bitmap_info.offset; + } if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) { mddev->reshape_position = le64_to_cpu(sb->reshape_position); @@ -1662,6 +1722,11 @@ static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev) mddev->new_level = le32_to_cpu(sb->new_level); mddev->new_layout = le32_to_cpu(sb->new_layout); mddev->new_chunk_sectors = le32_to_cpu(sb->new_chunk); + if (mddev->delta_disks < 0 || + (mddev->delta_disks == 0 && + (le32_to_cpu(sb->feature_map) + & MD_FEATURE_RESHAPE_BACKWARDS))) + mddev->reshape_backwards = 1; } else { mddev->reshape_position = MaxSector; mddev->delta_disks = 0; @@ -1735,7 +1800,6 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev) sb->feature_map = 0; sb->pad0 = 0; sb->recovery_offset = cpu_to_le64(0); - memset(sb->pad1, 0, sizeof(sb->pad1)); memset(sb->pad3, 0, sizeof(sb->pad3)); sb->utime = cpu_to_le64((__u64)mddev->utime); @@ -1757,6 +1821,8 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev) sb->devflags |= WriteMostly1; else sb->devflags &= ~WriteMostly1; + sb->data_offset = cpu_to_le64(rdev->data_offset); + sb->data_size = cpu_to_le64(rdev->sectors); if (mddev->bitmap && mddev->bitmap_info.file == NULL) { sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_info.offset); @@ -1781,6 +1847,16 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev) sb->delta_disks = cpu_to_le32(mddev->delta_disks); sb->new_level = cpu_to_le32(mddev->new_level); sb->new_chunk = cpu_to_le32(mddev->new_chunk_sectors); + if (mddev->delta_disks == 0 && + mddev->reshape_backwards) + sb->feature_map + |= cpu_to_le32(MD_FEATURE_RESHAPE_BACKWARDS); + if (rdev->new_data_offset != rdev->data_offset) { + sb->feature_map + |= cpu_to_le32(MD_FEATURE_NEW_OFFSET); + sb->new_offset = cpu_to_le32((__u32)(rdev->new_data_offset + - rdev->data_offset)); + } } if (rdev->badblocks.count == 0) @@ -1857,6 +1933,8 @@ super_1_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors) sector_t max_sectors; if (num_sectors && num_sectors < rdev->mddev->dev_sectors) return 0; /* component must fit device */ + if (rdev->data_offset != rdev->new_data_offset) + return 0; /* too confusing */ if (rdev->sb_start < rdev->data_offset) { /* minor versions 1 and 2; superblock before data */ max_sectors = i_size_read(rdev->bdev->bd_inode) >> 9; @@ -1884,6 +1962,40 @@ super_1_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors) rdev->sb_page); md_super_wait(rdev->mddev); return num_sectors; + +} + +static int +super_1_allow_new_offset(struct md_rdev *rdev, + unsigned long long new_offset) +{ + /* All necessary checks on new >= old have been done */ + struct bitmap *bitmap; + if (new_offset >= rdev->data_offset) + return 1; + + /* with 1.0 metadata, there is no metadata to tread on + * so we can always move back */ + if (rdev->mddev->minor_version == 0) + return 1; + + /* otherwise we must be sure not to step on + * any metadata, so stay: + * 36K beyond start of superblock + * beyond end of badblocks + * beyond write-intent bitmap + */ + if (rdev->sb_start + (32+4)*2 > new_offset) + return 0; + bitmap = rdev->mddev->bitmap; + if (bitmap && !rdev->mddev->bitmap_info.file && + rdev->sb_start + rdev->mddev->bitmap_info.offset + + bitmap->storage.file_pages * (PAGE_SIZE>>9) > new_offset) + return 0; + if (rdev->badblocks.sector + rdev->badblocks.size > new_offset) + return 0; + + return 1; } static struct super_type super_types[] = { @@ -1894,6 +2006,7 @@ static struct super_type super_types[] = { .validate_super = super_90_validate, .sync_super = super_90_sync, .rdev_size_change = super_90_rdev_size_change, + .allow_new_offset = super_90_allow_new_offset, }, [1] = { .name = "md-1", @@ -1902,6 +2015,7 @@ static struct super_type super_types[] = { .validate_super = super_1_validate, .sync_super = super_1_sync, .rdev_size_change = super_1_rdev_size_change, + .allow_new_offset = super_1_allow_new_offset, }, }; @@ -2105,9 +2219,7 @@ static void unbind_rdev_from_array(struct md_rdev * rdev) sysfs_remove_link(&rdev->kobj, "block"); sysfs_put(rdev->sysfs_state); rdev->sysfs_state = NULL; - kfree(rdev->badblocks.page); rdev->badblocks.count = 0; - rdev->badblocks.page = NULL; /* We need to delay this, otherwise we can deadlock when * writing to 'remove' to "dev/state". We also need * to delay it due to rcu usage. @@ -2158,7 +2270,7 @@ static void export_rdev(struct md_rdev * rdev) bdevname(rdev->bdev,b)); if (rdev->mddev) MD_BUG(); - free_disk_sb(rdev); + md_rdev_clear(rdev); #ifndef MODULE if (test_bit(AutoDetected, &rdev->flags)) md_autodetect_dev(rdev->bdev->bd_dev); @@ -2809,9 +2921,8 @@ offset_show(struct md_rdev *rdev, char *page) static ssize_t offset_store(struct md_rdev *rdev, const char *buf, size_t len) { - char *e; - unsigned long long offset = simple_strtoull(buf, &e, 10); - if (e==buf || (*e && *e != '\n')) + unsigned long long offset; + if (strict_strtoull(buf, 10, &offset) < 0) return -EINVAL; if (rdev->mddev->pers && rdev->raid_disk >= 0) return -EBUSY; @@ -2820,12 +2931,70 @@ offset_store(struct md_rdev *rdev, const char *buf, size_t len) * can be sane */ return -EBUSY; rdev->data_offset = offset; + rdev->new_data_offset = offset; return len; } static struct rdev_sysfs_entry rdev_offset = __ATTR(offset, S_IRUGO|S_IWUSR, offset_show, offset_store); +static ssize_t new_offset_show(struct md_rdev *rdev, char *page) +{ + return sprintf(page, "%llu\n", + (unsigned long long)rdev->new_data_offset); +} + +static ssize_t new_offset_store(struct md_rdev *rdev, + const char *buf, size_t len) +{ + unsigned long long new_offset; + struct mddev *mddev = rdev->mddev; + + if (strict_strtoull(buf, 10, &new_offset) < 0) + return -EINVAL; + + if (mddev->sync_thread) + return -EBUSY; + if (new_offset == rdev->data_offset) + /* reset is always permitted */ + ; + else if (new_offset > rdev->data_offset) { + /* must not push array size beyond rdev_sectors */ + if (new_offset - rdev->data_offset + + mddev->dev_sectors > rdev->sectors) + return -E2BIG; + } + /* Metadata worries about other space details. */ + + /* decreasing the offset is inconsistent with a backwards + * reshape. + */ + if (new_offset < rdev->data_offset && + mddev->reshape_backwards) + return -EINVAL; + /* Increasing offset is inconsistent with forwards + * reshape. reshape_direction should be set to + * 'backwards' first. + */ + if (new_offset > rdev->data_offset && + !mddev->reshape_backwards) + return -EINVAL; + + if (mddev->pers && mddev->persistent && + !super_types[mddev->major_version] + .allow_new_offset(rdev, new_offset)) + return -E2BIG; + rdev->new_data_offset = new_offset; + if (new_offset > rdev->data_offset) + mddev->reshape_backwards = 1; + else if (new_offset < rdev->data_offset) + mddev->reshape_backwards = 0; + + return len; +} +static struct rdev_sysfs_entry rdev_new_offset = +__ATTR(new_offset, S_IRUGO|S_IWUSR, new_offset_show, new_offset_store); + static ssize_t rdev_size_show(struct md_rdev *rdev, char *page) { @@ -2870,6 +3039,8 @@ rdev_size_store(struct md_rdev *rdev, const char *buf, size_t len) if (strict_blocks_to_sectors(buf, §ors) < 0) return -EINVAL; + if (rdev->data_offset != rdev->new_data_offset) + return -EINVAL; /* too confusing */ if (my_mddev->pers && rdev->raid_disk >= 0) { if (my_mddev->persistent) { sectors = super_types[my_mddev->major_version]. @@ -3006,6 +3177,7 @@ static struct attribute *rdev_default_attrs[] = { &rdev_errors.attr, &rdev_slot.attr, &rdev_offset.attr, + &rdev_new_offset.attr, &rdev_size.attr, &rdev_recovery_start.attr, &rdev_bad_blocks.attr, @@ -3080,6 +3252,7 @@ int md_rdev_init(struct md_rdev *rdev) rdev->raid_disk = -1; rdev->flags = 0; rdev->data_offset = 0; + rdev->new_data_offset = 0; rdev->sb_events = 0; rdev->last_read_error.tv_sec = 0; rdev->last_read_error.tv_nsec = 0; @@ -3178,8 +3351,7 @@ static struct md_rdev *md_import_device(dev_t newdev, int super_format, int supe abort_free: if (rdev->bdev) unlock_rdev(rdev); - free_disk_sb(rdev); - kfree(rdev->badblocks.page); + md_rdev_clear(rdev); kfree(rdev); return ERR_PTR(err); } @@ -3419,6 +3591,7 @@ level_store(struct mddev *mddev, const char *buf, size_t len) mddev->new_chunk_sectors = mddev->chunk_sectors; mddev->raid_disks -= mddev->delta_disks; mddev->delta_disks = 0; + mddev->reshape_backwards = 0; module_put(pers->owner); printk(KERN_WARNING "md: %s: %s would not accept array\n", mdname(mddev), clevel); @@ -3492,6 +3665,7 @@ level_store(struct mddev *mddev, const char *buf, size_t len) mddev->layout = mddev->new_layout; mddev->chunk_sectors = mddev->new_chunk_sectors; mddev->delta_disks = 0; + mddev->reshape_backwards = 0; mddev->degraded = 0; if (mddev->pers->sync_request == NULL) { /* this is now an array without redundancy, so @@ -3501,10 +3675,8 @@ level_store(struct mddev *mddev, const char *buf, size_t len) del_timer_sync(&mddev->safemode_timer); } pers->run(mddev); - mddev_resume(mddev); set_bit(MD_CHANGE_DEVS, &mddev->flags); - set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); - md_wakeup_thread(mddev->thread); + mddev_resume(mddev); sysfs_notify(&mddev->kobj, NULL, "level"); md_new_event(mddev); return rv; @@ -3582,9 +3754,20 @@ raid_disks_store(struct mddev *mddev, const char *buf, size_t len) if (mddev->pers) rv = update_raid_disks(mddev, n); else if (mddev->reshape_position != MaxSector) { + struct md_rdev *rdev; int olddisks = mddev->raid_disks - mddev->delta_disks; + + rdev_for_each(rdev, mddev) { + if (olddisks < n && + rdev->data_offset < rdev->new_data_offset) + return -EINVAL; + if (olddisks > n && + rdev->data_offset > rdev->new_data_offset) + return -EINVAL; + } mddev->delta_disks = n - olddisks; mddev->raid_disks = n; + mddev->reshape_backwards = (mddev->delta_disks < 0); } else mddev->raid_disks = n; return rv ? rv : len; @@ -3744,8 +3927,8 @@ array_state_show(struct mddev *mddev, char *page) return sprintf(page, "%s\n", array_states[st]); } -static int do_md_stop(struct mddev * mddev, int ro, int is_open); -static int md_set_readonly(struct mddev * mddev, int is_open); +static int do_md_stop(struct mddev * mddev, int ro, struct block_device *bdev); +static int md_set_readonly(struct mddev * mddev, struct block_device *bdev); static int do_md_run(struct mddev * mddev); static int restart_array(struct mddev *mddev); @@ -3761,14 +3944,14 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len) /* stopping an active array */ if (atomic_read(&mddev->openers) > 0) return -EBUSY; - err = do_md_stop(mddev, 0, 0); + err = do_md_stop(mddev, 0, NULL); break; case inactive: /* stopping an active array */ if (mddev->pers) { if (atomic_read(&mddev->openers) > 0) return -EBUSY; - err = do_md_stop(mddev, 2, 0); + err = do_md_stop(mddev, 2, NULL); } else err = 0; /* already inactive */ break; @@ -3776,7 +3959,7 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len) break; /* not supported yet */ case readonly: if (mddev->pers) - err = md_set_readonly(mddev, 0); + err = md_set_readonly(mddev, NULL); else { mddev->ro = 1; set_disk_ro(mddev->gendisk, 1); @@ -3786,7 +3969,7 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len) case read_auto: if (mddev->pers) { if (mddev->ro == 0) - err = md_set_readonly(mddev, 0); + err = md_set_readonly(mddev, NULL); else if (mddev->ro == 1) err = restart_array(mddev); if (err == 0) { @@ -4266,7 +4449,8 @@ sync_completed_show(struct mddev *mddev, char *page) if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) return sprintf(page, "none\n"); - if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) + if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) || + test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) max_sectors = mddev->resync_max_sectors; else max_sectors = mddev->dev_sectors; @@ -4428,6 +4612,7 @@ reshape_position_show(struct mddev *mddev, char *page) static ssize_t reshape_position_store(struct mddev *mddev, const char *buf, size_t len) { + struct md_rdev *rdev; char *e; unsigned long long new = simple_strtoull(buf, &e, 10); if (mddev->pers) @@ -4436,9 +4621,12 @@ reshape_position_store(struct mddev *mddev, const char *buf, size_t len) return -EINVAL; mddev->reshape_position = new; mddev->delta_disks = 0; + mddev->reshape_backwards = 0; mddev->new_level = mddev->level; mddev->new_layout = mddev->layout; mddev->new_chunk_sectors = mddev->chunk_sectors; + rdev_for_each(rdev, mddev) + rdev->new_data_offset = rdev->data_offset; return len; } @@ -4447,6 +4635,42 @@ __ATTR(reshape_position, S_IRUGO|S_IWUSR, reshape_position_show, reshape_position_store); static ssize_t +reshape_direction_show(struct mddev *mddev, char *page) +{ + return sprintf(page, "%s\n", + mddev->reshape_backwards ? "backwards" : "forwards"); +} + +static ssize_t +reshape_direction_store(struct mddev *mddev, const char *buf, size_t len) +{ + int backwards = 0; + if (cmd_match(buf, "forwards")) + backwards = 0; + else if (cmd_match(buf, "backwards")) + backwards = 1; + else + return -EINVAL; + if (mddev->reshape_backwards == backwards) + return len; + + /* check if we are allowed to change */ + if (mddev->delta_disks) + return -EBUSY; + + if (mddev->persistent && + mddev->major_version == 0) + return -EINVAL; + + mddev->reshape_backwards = backwards; + return len; +} + +static struct md_sysfs_entry md_reshape_direction = +__ATTR(reshape_direction, S_IRUGO|S_IWUSR, reshape_direction_show, + reshape_direction_store); + +static ssize_t array_size_show(struct mddev *mddev, char *page) { if (mddev->external_size) @@ -4501,6 +4725,7 @@ static struct attribute *md_default_attrs[] = { &md_safe_delay.attr, &md_array_state.attr, &md_reshape_position.attr, + &md_reshape_direction.attr, &md_array_size.attr, &max_corr_read_errors.attr, NULL, @@ -4914,7 +5139,8 @@ int md_run(struct mddev *mddev) err = -EINVAL; mddev->pers->stop(mddev); } - if (err == 0 && mddev->pers->sync_request) { + if (err == 0 && mddev->pers->sync_request && + (mddev->bitmap_info.file || mddev->bitmap_info.offset)) { err = bitmap_create(mddev); if (err) { printk(KERN_ERR "%s: failed to create bitmap (%d)\n", @@ -5064,6 +5290,7 @@ static void md_clean(struct mddev *mddev) mddev->events = 0; mddev->can_decrease_events = 0; mddev->delta_disks = 0; + mddev->reshape_backwards = 0; mddev->new_level = LEVEL_NONE; mddev->new_layout = 0; mddev->new_chunk_sectors = 0; @@ -5079,6 +5306,7 @@ static void md_clean(struct mddev *mddev) mddev->merge_check_needed = 0; mddev->bitmap_info.offset = 0; mddev->bitmap_info.default_offset = 0; + mddev->bitmap_info.default_space = 0; mddev->bitmap_info.chunksize = 0; mddev->bitmap_info.daemon_sleep = 0; mddev->bitmap_info.max_write_behind = 0; @@ -5124,15 +5352,17 @@ void md_stop(struct mddev *mddev) } EXPORT_SYMBOL_GPL(md_stop); -static int md_set_readonly(struct mddev *mddev, int is_open) +static int md_set_readonly(struct mddev *mddev, struct block_device *bdev) { int err = 0; mutex_lock(&mddev->open_mutex); - if (atomic_read(&mddev->openers) > is_open) { + if (atomic_read(&mddev->openers) > !!bdev) { printk("md: %s still in use.\n",mdname(mddev)); err = -EBUSY; goto out; } + if (bdev) + sync_blockdev(bdev); if (mddev->pers) { __md_stop_writes(mddev); @@ -5154,18 +5384,26 @@ out: * 0 - completely stop and dis-assemble array * 2 - stop but do not disassemble array */ -static int do_md_stop(struct mddev * mddev, int mode, int is_open) +static int do_md_stop(struct mddev * mddev, int mode, + struct block_device *bdev) { struct gendisk *disk = mddev->gendisk; struct md_rdev *rdev; mutex_lock(&mddev->open_mutex); - if (atomic_read(&mddev->openers) > is_open || + if (atomic_read(&mddev->openers) > !!bdev || mddev->sysfs_active) { printk("md: %s still in use.\n",mdname(mddev)); mutex_unlock(&mddev->open_mutex); return -EBUSY; } + if (bdev) + /* It is possible IO was issued on some other + * open file which was closed before we took ->open_mutex. + * As that was not the last close __blkdev_put will not + * have called sync_blockdev, so we must. + */ + sync_blockdev(bdev); if (mddev->pers) { if (mddev->ro) @@ -5239,7 +5477,7 @@ static void autorun_array(struct mddev *mddev) err = do_md_run(mddev); if (err) { printk(KERN_WARNING "md: do_md_run() returned %d\n", err); - do_md_stop(mddev, 0, 0); + do_md_stop(mddev, 0, NULL); } } @@ -5421,7 +5659,7 @@ static int get_bitmap_file(struct mddev * mddev, void __user * arg) goto out; /* bitmap disabled, zero the first byte and copy out */ - if (!mddev->bitmap || !mddev->bitmap->file) { + if (!mddev->bitmap || !mddev->bitmap->storage.file) { file->pathname[0] = '\0'; goto copy_out; } @@ -5430,7 +5668,8 @@ static int get_bitmap_file(struct mddev * mddev, void __user * arg) if (!buf) goto out; - ptr = d_path(&mddev->bitmap->file->f_path, buf, sizeof(file->pathname)); + ptr = d_path(&mddev->bitmap->storage.file->f_path, + buf, sizeof(file->pathname)); if (IS_ERR(ptr)) goto out; @@ -5556,8 +5795,7 @@ static int add_new_disk(struct mddev * mddev, mdu_disk_info_t *info) super_types[mddev->major_version]. validate_super(mddev, rdev); if ((info->state & (1<<MD_DISK_SYNC)) && - (!test_bit(In_sync, &rdev->flags) || - rdev->raid_disk != info->raid_disk)) { + rdev->raid_disk != info->raid_disk) { /* This was a hot-add request, but events doesn't * match, so reject it. */ @@ -5875,6 +6113,7 @@ static int set_array_info(struct mddev * mddev, mdu_array_info_t *info) set_bit(MD_CHANGE_DEVS, &mddev->flags); mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9; + mddev->bitmap_info.default_space = 64*2 - (MD_SB_BYTES >> 9); mddev->bitmap_info.offset = 0; mddev->reshape_position = MaxSector; @@ -5888,6 +6127,7 @@ static int set_array_info(struct mddev * mddev, mdu_array_info_t *info) mddev->new_chunk_sectors = mddev->chunk_sectors; mddev->new_layout = mddev->layout; mddev->delta_disks = 0; + mddev->reshape_backwards = 0; return 0; } @@ -5922,11 +6162,7 @@ static int update_size(struct mddev *mddev, sector_t num_sectors) */ if (mddev->sync_thread) return -EBUSY; - if (mddev->bitmap) - /* Sorry, cannot grow a bitmap yet, just remove it, - * grow, and re-add. - */ - return -EBUSY; + rdev_for_each(rdev, mddev) { sector_t avail = rdev->sectors; @@ -5944,6 +6180,7 @@ static int update_size(struct mddev *mddev, sector_t num_sectors) static int update_raid_disks(struct mddev *mddev, int raid_disks) { int rv; + struct md_rdev *rdev; /* change the number of raid disks */ if (mddev->pers->check_reshape == NULL) return -EINVAL; @@ -5952,11 +6189,27 @@ static int update_raid_disks(struct mddev *mddev, int raid_disks) return -EINVAL; if (mddev->sync_thread || mddev->reshape_position != MaxSector) return -EBUSY; + + rdev_for_each(rdev, mddev) { + if (mddev->raid_disks < raid_disks && + rdev->data_offset < rdev->new_data_offset) + return -EINVAL; + if (mddev->raid_disks > raid_disks && + rdev->data_offset > rdev->new_data_offset) + return -EINVAL; + } + mddev->delta_disks = raid_disks - mddev->raid_disks; + if (mddev->delta_disks < 0) + mddev->reshape_backwards = 1; + else if (mddev->delta_disks > 0) + mddev->reshape_backwards = 0; rv = mddev->pers->check_reshape(mddev); - if (rv < 0) + if (rv < 0) { mddev->delta_disks = 0; + mddev->reshape_backwards = 0; + } return rv; } @@ -6039,6 +6292,8 @@ static int update_array_info(struct mddev *mddev, mdu_array_info_t *info) return -EINVAL; mddev->bitmap_info.offset = mddev->bitmap_info.default_offset; + mddev->bitmap_info.space = + mddev->bitmap_info.default_space; mddev->pers->quiesce(mddev, 1); rv = bitmap_create(mddev); if (!rv) @@ -6050,7 +6305,7 @@ static int update_array_info(struct mddev *mddev, mdu_array_info_t *info) /* remove the bitmap */ if (!mddev->bitmap) return -ENOENT; - if (mddev->bitmap->file) + if (mddev->bitmap->storage.file) return -EINVAL; mddev->pers->quiesce(mddev, 1); bitmap_destroy(mddev); @@ -6237,11 +6492,11 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode, goto done_unlock; case STOP_ARRAY: - err = do_md_stop(mddev, 0, 1); + err = do_md_stop(mddev, 0, bdev); goto done_unlock; case STOP_ARRAY_RO: - err = md_set_readonly(mddev, 1); + err = md_set_readonly(mddev, bdev); goto done_unlock; case BLKROSET: @@ -6373,6 +6628,9 @@ static int md_open(struct block_device *bdev, fmode_t mode) struct mddev *mddev = mddev_find(bdev->bd_dev); int err; + if (!mddev) + return -ENODEV; + if (mddev->gendisk != bdev->bd_disk) { /* we are racing with mddev_put which is discarding this * bd_disk. @@ -6503,7 +6761,7 @@ struct md_thread *md_register_thread(void (*run) (struct mddev *), struct mddev thread->tsk = kthread_run(md_thread, thread, "%s_%s", mdname(thread->mddev), - name ?: mddev->pers->name); + name); if (IS_ERR(thread->tsk)) { kfree(thread); return NULL; @@ -6584,7 +6842,8 @@ static void status_resync(struct seq_file *seq, struct mddev * mddev) resync = mddev->curr_resync - atomic_read(&mddev->recovery_active); - if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) + if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) || + test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) max_sectors = mddev->resync_max_sectors; else max_sectors = mddev->dev_sectors; @@ -7049,6 +7308,7 @@ void md_do_sync(struct mddev *mddev) int skipped = 0; struct md_rdev *rdev; char *desc; + struct blk_plug plug; /* just incase thread restarts... */ if (test_bit(MD_RECOVERY_DONE, &mddev->recovery)) @@ -7147,7 +7407,7 @@ void md_do_sync(struct mddev *mddev) j = mddev->recovery_cp; } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) - max_sectors = mddev->dev_sectors; + max_sectors = mddev->resync_max_sectors; else { /* recovery follows the physical size of devices */ max_sectors = mddev->dev_sectors; @@ -7198,6 +7458,7 @@ void md_do_sync(struct mddev *mddev) } mddev->curr_resync_completed = j; + blk_start_plug(&plug); while (j < max_sectors) { sector_t sectors; @@ -7303,6 +7564,7 @@ void md_do_sync(struct mddev *mddev) * this also signals 'finished resyncing' to md_stop */ out: + blk_finish_plug(&plug); wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active)); /* tell personality that we are finished */ @@ -7598,7 +7860,7 @@ void md_check_recovery(struct mddev *mddev) goto unlock; if (mddev->pers->sync_request) { - if (spares && mddev->bitmap && ! mddev->bitmap->file) { + if (spares) { /* We are adding a device or devices to an array * which has the bitmap stored on all devices. * So make sure all bitmap pages get written @@ -7646,6 +7908,20 @@ void md_wait_for_blocked_rdev(struct md_rdev *rdev, struct mddev *mddev) } EXPORT_SYMBOL(md_wait_for_blocked_rdev); +void md_finish_reshape(struct mddev *mddev) +{ + /* called be personality module when reshape completes. */ + struct md_rdev *rdev; + + rdev_for_each(rdev, mddev) { + if (rdev->data_offset > rdev->new_data_offset) + rdev->sectors += rdev->data_offset - rdev->new_data_offset; + else + rdev->sectors -= rdev->new_data_offset - rdev->data_offset; + rdev->data_offset = rdev->new_data_offset; + } +} +EXPORT_SYMBOL(md_finish_reshape); /* Bad block management. * We can record which blocks on each device are 'bad' and so just @@ -7894,10 +8170,15 @@ static int md_set_badblocks(struct badblocks *bb, sector_t s, int sectors, } int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors, - int acknowledged) + int is_new) { - int rv = md_set_badblocks(&rdev->badblocks, - s + rdev->data_offset, sectors, acknowledged); + int rv; + if (is_new) + s += rdev->new_data_offset; + else + s += rdev->data_offset; + rv = md_set_badblocks(&rdev->badblocks, + s, sectors, 0); if (rv) { /* Make sure they get written out promptly */ sysfs_notify_dirent_safe(rdev->sysfs_state); @@ -8003,11 +8284,15 @@ out: return rv; } -int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors) +int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors, + int is_new) { + if (is_new) + s += rdev->new_data_offset; + else + s += rdev->data_offset; return md_clear_badblocks(&rdev->badblocks, - s + rdev->data_offset, - sectors); + s, sectors); } EXPORT_SYMBOL_GPL(rdev_clear_badblocks); diff --git a/drivers/md/md.h b/drivers/md/md.h index 1c2063ccf48e..7b4a3c318cae 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h @@ -55,6 +55,7 @@ struct md_rdev { int sb_loaded; __u64 sb_events; sector_t data_offset; /* start of data in array */ + sector_t new_data_offset;/* only relevant while reshaping */ sector_t sb_start; /* offset of the super block (in 512byte sectors) */ int sb_size; /* bytes in the superblock */ int preferred_minor; /* autorun support */ @@ -193,8 +194,9 @@ static inline int is_badblock(struct md_rdev *rdev, sector_t s, int sectors, return 0; } extern int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors, - int acknowledged); -extern int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors); + int is_new); +extern int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors, + int is_new); extern void md_ack_all_badblocks(struct badblocks *bb); struct mddev { @@ -262,6 +264,7 @@ struct mddev { sector_t reshape_position; int delta_disks, new_level, new_layout; int new_chunk_sectors; + int reshape_backwards; atomic_t plug_cnt; /* If device is expecting * more bios soon. @@ -390,10 +393,13 @@ struct mddev { * For external metadata, offset * from start of device. */ + unsigned long space; /* space available at this offset */ loff_t default_offset; /* this is the offset to use when * hot-adding a bitmap. It should * eventually be settable by sysfs. */ + unsigned long default_space; /* space available at + * default offset */ struct mutex mutex; unsigned long chunksize; unsigned long daemon_sleep; /* how many jiffies between updates? */ @@ -591,6 +597,7 @@ extern void md_write_start(struct mddev *mddev, struct bio *bi); extern void md_write_end(struct mddev *mddev); extern void md_done_sync(struct mddev *mddev, int blocks, int ok); extern void md_error(struct mddev *mddev, struct md_rdev *rdev); +extern void md_finish_reshape(struct mddev *mddev); extern int mddev_congested(struct mddev *mddev, int bits); extern void md_flush_request(struct mddev *mddev, struct bio *bio); @@ -615,6 +622,7 @@ extern int md_run(struct mddev *mddev); extern void md_stop(struct mddev *mddev); extern void md_stop_writes(struct mddev *mddev); extern int md_rdev_init(struct md_rdev *rdev); +extern void md_rdev_clear(struct md_rdev *rdev); extern void mddev_suspend(struct mddev *mddev); extern void mddev_resume(struct mddev *mddev); diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c index 9339e67fcc79..61a1833ebaf3 100644 --- a/drivers/md/multipath.c +++ b/drivers/md/multipath.c @@ -474,7 +474,8 @@ static int multipath_run (struct mddev *mddev) } { - mddev->thread = md_register_thread(multipathd, mddev, NULL); + mddev->thread = md_register_thread(multipathd, mddev, + "multipath"); if (!mddev->thread) { printk(KERN_ERR "multipath: couldn't allocate thread" " for %s\n", mdname(mddev)); diff --git a/drivers/md/persistent-data/dm-space-map-checker.c b/drivers/md/persistent-data/dm-space-map-checker.c index 50ed53bf4aa2..fc90c11620ad 100644 --- a/drivers/md/persistent-data/dm-space-map-checker.c +++ b/drivers/md/persistent-data/dm-space-map-checker.c @@ -8,6 +8,7 @@ #include <linux/device-mapper.h> #include <linux/export.h> +#include <linux/vmalloc.h> #ifdef CONFIG_DM_DEBUG_SPACE_MAPS @@ -89,13 +90,23 @@ static int ca_create(struct count_array *ca, struct dm_space_map *sm) ca->nr = nr_blocks; ca->nr_free = nr_blocks; - ca->counts = kzalloc(sizeof(*ca->counts) * nr_blocks, GFP_KERNEL); - if (!ca->counts) - return -ENOMEM; + + if (!nr_blocks) + ca->counts = NULL; + else { + ca->counts = vzalloc(sizeof(*ca->counts) * nr_blocks); + if (!ca->counts) + return -ENOMEM; + } return 0; } +static void ca_destroy(struct count_array *ca) +{ + vfree(ca->counts); +} + static int ca_load(struct count_array *ca, struct dm_space_map *sm) { int r; @@ -126,12 +137,14 @@ static int ca_load(struct count_array *ca, struct dm_space_map *sm) static int ca_extend(struct count_array *ca, dm_block_t extra_blocks) { dm_block_t nr_blocks = ca->nr + extra_blocks; - uint32_t *counts = kzalloc(sizeof(*counts) * nr_blocks, GFP_KERNEL); + uint32_t *counts = vzalloc(sizeof(*counts) * nr_blocks); if (!counts) return -ENOMEM; - memcpy(counts, ca->counts, sizeof(*counts) * ca->nr); - kfree(ca->counts); + if (ca->counts) { + memcpy(counts, ca->counts, sizeof(*counts) * ca->nr); + ca_destroy(ca); + } ca->nr = nr_blocks; ca->nr_free += extra_blocks; ca->counts = counts; @@ -151,11 +164,6 @@ static int ca_commit(struct count_array *old, struct count_array *new) return 0; } -static void ca_destroy(struct count_array *ca) -{ - kfree(ca->counts); -} - /*----------------------------------------------------------------*/ struct sm_checker { @@ -343,25 +351,25 @@ struct dm_space_map *dm_sm_checker_create(struct dm_space_map *sm) int r; struct sm_checker *smc; - if (!sm) - return NULL; + if (IS_ERR_OR_NULL(sm)) + return ERR_PTR(-EINVAL); smc = kmalloc(sizeof(*smc), GFP_KERNEL); if (!smc) - return NULL; + return ERR_PTR(-ENOMEM); memcpy(&smc->sm, &ops_, sizeof(smc->sm)); r = ca_create(&smc->old_counts, sm); if (r) { kfree(smc); - return NULL; + return ERR_PTR(r); } r = ca_create(&smc->counts, sm); if (r) { ca_destroy(&smc->old_counts); kfree(smc); - return NULL; + return ERR_PTR(r); } smc->real_sm = sm; @@ -371,7 +379,7 @@ struct dm_space_map *dm_sm_checker_create(struct dm_space_map *sm) ca_destroy(&smc->counts); ca_destroy(&smc->old_counts); kfree(smc); - return NULL; + return ERR_PTR(r); } r = ca_commit(&smc->old_counts, &smc->counts); @@ -379,7 +387,7 @@ struct dm_space_map *dm_sm_checker_create(struct dm_space_map *sm) ca_destroy(&smc->counts); ca_destroy(&smc->old_counts); kfree(smc); - return NULL; + return ERR_PTR(r); } return &smc->sm; @@ -391,25 +399,25 @@ struct dm_space_map *dm_sm_checker_create_fresh(struct dm_space_map *sm) int r; struct sm_checker *smc; - if (!sm) - return NULL; + if (IS_ERR_OR_NULL(sm)) + return ERR_PTR(-EINVAL); smc = kmalloc(sizeof(*smc), GFP_KERNEL); if (!smc) - return NULL; + return ERR_PTR(-ENOMEM); memcpy(&smc->sm, &ops_, sizeof(smc->sm)); r = ca_create(&smc->old_counts, sm); if (r) { kfree(smc); - return NULL; + return ERR_PTR(r); } r = ca_create(&smc->counts, sm); if (r) { ca_destroy(&smc->old_counts); kfree(smc); - return NULL; + return ERR_PTR(r); } smc->real_sm = sm; diff --git a/drivers/md/persistent-data/dm-space-map-disk.c b/drivers/md/persistent-data/dm-space-map-disk.c index fc469ba9f627..3d0ed5332883 100644 --- a/drivers/md/persistent-data/dm-space-map-disk.c +++ b/drivers/md/persistent-data/dm-space-map-disk.c @@ -290,7 +290,16 @@ struct dm_space_map *dm_sm_disk_create(struct dm_transaction_manager *tm, dm_block_t nr_blocks) { struct dm_space_map *sm = dm_sm_disk_create_real(tm, nr_blocks); - return dm_sm_checker_create_fresh(sm); + struct dm_space_map *smc; + + if (IS_ERR_OR_NULL(sm)) + return sm; + + smc = dm_sm_checker_create_fresh(sm); + if (IS_ERR(smc)) + dm_sm_destroy(sm); + + return smc; } EXPORT_SYMBOL_GPL(dm_sm_disk_create); diff --git a/drivers/md/persistent-data/dm-transaction-manager.c b/drivers/md/persistent-data/dm-transaction-manager.c index 6f8d38747d7f..e5604b32d91f 100644 --- a/drivers/md/persistent-data/dm-transaction-manager.c +++ b/drivers/md/persistent-data/dm-transaction-manager.c @@ -138,6 +138,9 @@ EXPORT_SYMBOL_GPL(dm_tm_create_non_blocking_clone); void dm_tm_destroy(struct dm_transaction_manager *tm) { + if (!tm->is_clone) + wipe_shadow_table(tm); + kfree(tm); } EXPORT_SYMBOL_GPL(dm_tm_destroy); @@ -249,6 +252,7 @@ int dm_tm_shadow_block(struct dm_transaction_manager *tm, dm_block_t orig, return r; } +EXPORT_SYMBOL_GPL(dm_tm_shadow_block); int dm_tm_read_lock(struct dm_transaction_manager *tm, dm_block_t b, struct dm_block_validator *v, @@ -259,6 +263,7 @@ int dm_tm_read_lock(struct dm_transaction_manager *tm, dm_block_t b, return dm_bm_read_lock(tm->bm, b, v, blk); } +EXPORT_SYMBOL_GPL(dm_tm_read_lock); int dm_tm_unlock(struct dm_transaction_manager *tm, struct dm_block *b) { @@ -342,8 +347,10 @@ static int dm_tm_create_internal(struct dm_block_manager *bm, } *sm = dm_sm_checker_create(inner); - if (!*sm) + if (IS_ERR(*sm)) { + r = PTR_ERR(*sm); goto bad2; + } } else { r = dm_bm_write_lock(dm_tm_get_bm(*tm), sb_location, @@ -362,8 +369,10 @@ static int dm_tm_create_internal(struct dm_block_manager *bm, } *sm = dm_sm_checker_create(inner); - if (!*sm) + if (IS_ERR(*sm)) { + r = PTR_ERR(*sm); goto bad2; + } } return 0; diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 15dd59b84e94..cacd008d6864 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -517,8 +517,8 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect int bad_sectors; int disk = start_disk + i; - if (disk >= conf->raid_disks) - disk -= conf->raid_disks; + if (disk >= conf->raid_disks * 2) + disk -= conf->raid_disks * 2; rdev = rcu_dereference(conf->mirrors[disk].rdev); if (r1_bio->bios[disk] == IO_BLOCKED @@ -883,7 +883,6 @@ static void make_request(struct mddev *mddev, struct bio * bio) const unsigned long do_sync = (bio->bi_rw & REQ_SYNC); const unsigned long do_flush_fua = (bio->bi_rw & (REQ_FLUSH | REQ_FUA)); struct md_rdev *blocked_rdev; - int plugged; int first_clone; int sectors_handled; int max_sectors; @@ -1034,7 +1033,6 @@ read_again: * the bad blocks. Each set of writes gets it's own r1bio * with a set of bios attached. */ - plugged = mddev_check_plugged(mddev); disks = conf->raid_disks * 2; retry_write: @@ -1191,6 +1189,8 @@ read_again: bio_list_add(&conf->pending_bio_list, mbio); conf->pending_count++; spin_unlock_irqrestore(&conf->device_lock, flags); + if (!mddev_check_plugged(mddev)) + md_wakeup_thread(mddev->thread); } /* Mustn't call r1_bio_write_done before this next test, * as it could result in the bio being freed. @@ -1213,9 +1213,6 @@ read_again: /* In case raid1d snuck in to freeze_array */ wake_up(&conf->wait_barrier); - - if (do_sync || !bitmap || !plugged) - md_wakeup_thread(mddev->thread); } static void status(struct seq_file *seq, struct mddev *mddev) @@ -1821,8 +1818,14 @@ static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio) if (atomic_dec_and_test(&r1_bio->remaining)) { /* if we're here, all write(s) have completed, so clean up */ - md_done_sync(mddev, r1_bio->sectors, 1); - put_buf(r1_bio); + int s = r1_bio->sectors; + if (test_bit(R1BIO_MadeGood, &r1_bio->state) || + test_bit(R1BIO_WriteError, &r1_bio->state)) + reschedule_retry(r1_bio); + else { + put_buf(r1_bio); + md_done_sync(mddev, s, 1); + } } } @@ -1859,7 +1862,9 @@ static void fix_read_error(struct r1conf *conf, int read_disk, rdev = conf->mirrors[d].rdev; if (rdev && - test_bit(In_sync, &rdev->flags) && + (test_bit(In_sync, &rdev->flags) || + (!test_bit(Faulty, &rdev->flags) && + rdev->recovery_offset >= sect + s)) && is_badblock(rdev, sect, s, &first_bad, &bad_sectors) == 0 && sync_page_io(rdev, sect, s<<9, @@ -2024,7 +2029,7 @@ static void handle_sync_write_finished(struct r1conf *conf, struct r1bio *r1_bio continue; if (test_bit(BIO_UPTODATE, &bio->bi_flags) && test_bit(R1BIO_MadeGood, &r1_bio->state)) { - rdev_clear_badblocks(rdev, r1_bio->sector, s); + rdev_clear_badblocks(rdev, r1_bio->sector, s, 0); } if (!test_bit(BIO_UPTODATE, &bio->bi_flags) && test_bit(R1BIO_WriteError, &r1_bio->state)) { @@ -2044,7 +2049,7 @@ static void handle_write_finished(struct r1conf *conf, struct r1bio *r1_bio) struct md_rdev *rdev = conf->mirrors[m].rdev; rdev_clear_badblocks(rdev, r1_bio->sector, - r1_bio->sectors); + r1_bio->sectors, 0); rdev_dec_pending(rdev, conf->mddev); } else if (r1_bio->bios[m] != NULL) { /* This drive got a write error. We need to @@ -2486,9 +2491,10 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp */ if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { atomic_set(&r1_bio->remaining, read_targets); - for (i = 0; i < conf->raid_disks * 2; i++) { + for (i = 0; i < conf->raid_disks * 2 && read_targets; i++) { bio = r1_bio->bios[i]; if (bio->bi_end_io == end_sync_read) { + read_targets--; md_sync_acct(bio->bi_bdev, nr_sectors); generic_make_request(bio); } @@ -2548,6 +2554,7 @@ static struct r1conf *setup_conf(struct mddev *mddev) err = -EINVAL; spin_lock_init(&conf->device_lock); rdev_for_each(rdev, mddev) { + struct request_queue *q; int disk_idx = rdev->raid_disk; if (disk_idx >= mddev->raid_disks || disk_idx < 0) @@ -2560,6 +2567,9 @@ static struct r1conf *setup_conf(struct mddev *mddev) if (disk->rdev) goto abort; disk->rdev = rdev; + q = bdev_get_queue(rdev->bdev); + if (q->merge_bvec_fn) + mddev->merge_check_needed = 1; disk->head_position = 0; } @@ -2598,7 +2608,8 @@ static struct r1conf *setup_conf(struct mddev *mddev) if (!disk->rdev || !test_bit(In_sync, &disk->rdev->flags)) { disk->head_position = 0; - if (disk->rdev) + if (disk->rdev && + (disk->rdev->saved_raid_disk < 0)) conf->fullsync = 1; } else if (conf->last_used < 0) /* @@ -2614,7 +2625,7 @@ static struct r1conf *setup_conf(struct mddev *mddev) goto abort; } err = -ENOMEM; - conf->thread = md_register_thread(raid1d, mddev, NULL); + conf->thread = md_register_thread(raid1d, mddev, "raid1"); if (!conf->thread) { printk(KERN_ERR "md/raid1:%s: couldn't allocate thread\n", @@ -2750,9 +2761,16 @@ static int raid1_resize(struct mddev *mddev, sector_t sectors) * any io in the removed space completes, but it hardly seems * worth it. */ - md_set_array_sectors(mddev, raid1_size(mddev, sectors, 0)); - if (mddev->array_sectors > raid1_size(mddev, sectors, 0)) + sector_t newsize = raid1_size(mddev, sectors, 0); + if (mddev->external_size && + mddev->array_sectors > newsize) return -EINVAL; + if (mddev->bitmap) { + int ret = bitmap_resize(mddev->bitmap, newsize, 0, 0); + if (ret) + return ret; + } + md_set_array_sectors(mddev, newsize); set_capacity(mddev->gendisk, mddev->array_sectors); revalidate_disk(mddev->gendisk); if (sectors > mddev->dev_sectors && diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 3f91c2e1dfe7..8da6282254c3 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -24,6 +24,7 @@ #include <linux/module.h> #include <linux/seq_file.h> #include <linux/ratelimit.h> +#include <linux/kthread.h> #include "md.h" #include "raid10.h" #include "raid0.h" @@ -68,6 +69,11 @@ static int max_queued_requests = 1024; static void allow_barrier(struct r10conf *conf); static void lower_barrier(struct r10conf *conf); static int enough(struct r10conf *conf, int ignore); +static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, + int *skipped); +static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio); +static void end_reshape_write(struct bio *bio, int error); +static void end_reshape(struct r10conf *conf); static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data) { @@ -112,7 +118,8 @@ static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data) if (!r10_bio) return NULL; - if (test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery)) + if (test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery) || + test_bit(MD_RECOVERY_RESHAPE, &conf->mddev->recovery)) nalloc = conf->copies; /* resync */ else nalloc = 2; /* recovery */ @@ -140,9 +147,10 @@ static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data) struct bio *rbio = r10_bio->devs[j].repl_bio; bio = r10_bio->devs[j].bio; for (i = 0; i < RESYNC_PAGES; i++) { - if (j == 1 && !test_bit(MD_RECOVERY_SYNC, - &conf->mddev->recovery)) { - /* we can share bv_page's during recovery */ + if (j > 0 && !test_bit(MD_RECOVERY_SYNC, + &conf->mddev->recovery)) { + /* we can share bv_page's during recovery + * and reshape */ struct bio *rbio = r10_bio->devs[0].bio; page = rbio->bi_io_vec[i].bv_page; get_page(page); @@ -165,10 +173,11 @@ out_free_pages: while (j--) for (i = 0; i < RESYNC_PAGES ; i++) safe_put_page(r10_bio->devs[j].bio->bi_io_vec[i].bv_page); - j = -1; + j = 0; out_free_bio: - while (++j < nalloc) { - bio_put(r10_bio->devs[j].bio); + for ( ; j < nalloc; j++) { + if (r10_bio->devs[j].bio) + bio_put(r10_bio->devs[j].bio); if (r10_bio->devs[j].repl_bio) bio_put(r10_bio->devs[j].repl_bio); } @@ -504,79 +513,96 @@ static void raid10_end_write_request(struct bio *bio, int error) * sector offset to a virtual address */ -static void raid10_find_phys(struct r10conf *conf, struct r10bio *r10bio) +static void __raid10_find_phys(struct geom *geo, struct r10bio *r10bio) { int n,f; sector_t sector; sector_t chunk; sector_t stripe; int dev; - int slot = 0; /* now calculate first sector/dev */ - chunk = r10bio->sector >> conf->chunk_shift; - sector = r10bio->sector & conf->chunk_mask; + chunk = r10bio->sector >> geo->chunk_shift; + sector = r10bio->sector & geo->chunk_mask; - chunk *= conf->near_copies; + chunk *= geo->near_copies; stripe = chunk; - dev = sector_div(stripe, conf->raid_disks); - if (conf->far_offset) - stripe *= conf->far_copies; + dev = sector_div(stripe, geo->raid_disks); + if (geo->far_offset) + stripe *= geo->far_copies; - sector += stripe << conf->chunk_shift; + sector += stripe << geo->chunk_shift; /* and calculate all the others */ - for (n=0; n < conf->near_copies; n++) { + for (n = 0; n < geo->near_copies; n++) { int d = dev; sector_t s = sector; r10bio->devs[slot].addr = sector; r10bio->devs[slot].devnum = d; slot++; - for (f = 1; f < conf->far_copies; f++) { - d += conf->near_copies; - if (d >= conf->raid_disks) - d -= conf->raid_disks; - s += conf->stride; + for (f = 1; f < geo->far_copies; f++) { + d += geo->near_copies; + if (d >= geo->raid_disks) + d -= geo->raid_disks; + s += geo->stride; r10bio->devs[slot].devnum = d; r10bio->devs[slot].addr = s; slot++; } dev++; - if (dev >= conf->raid_disks) { + if (dev >= geo->raid_disks) { dev = 0; - sector += (conf->chunk_mask + 1); + sector += (geo->chunk_mask + 1); } } - BUG_ON(slot != conf->copies); +} + +static void raid10_find_phys(struct r10conf *conf, struct r10bio *r10bio) +{ + struct geom *geo = &conf->geo; + + if (conf->reshape_progress != MaxSector && + ((r10bio->sector >= conf->reshape_progress) != + conf->mddev->reshape_backwards)) { + set_bit(R10BIO_Previous, &r10bio->state); + geo = &conf->prev; + } else + clear_bit(R10BIO_Previous, &r10bio->state); + + __raid10_find_phys(geo, r10bio); } static sector_t raid10_find_virt(struct r10conf *conf, sector_t sector, int dev) { sector_t offset, chunk, vchunk; + /* Never use conf->prev as this is only called during resync + * or recovery, so reshape isn't happening + */ + struct geom *geo = &conf->geo; - offset = sector & conf->chunk_mask; - if (conf->far_offset) { + offset = sector & geo->chunk_mask; + if (geo->far_offset) { int fc; - chunk = sector >> conf->chunk_shift; - fc = sector_div(chunk, conf->far_copies); - dev -= fc * conf->near_copies; + chunk = sector >> geo->chunk_shift; + fc = sector_div(chunk, geo->far_copies); + dev -= fc * geo->near_copies; if (dev < 0) - dev += conf->raid_disks; + dev += geo->raid_disks; } else { - while (sector >= conf->stride) { - sector -= conf->stride; - if (dev < conf->near_copies) - dev += conf->raid_disks - conf->near_copies; + while (sector >= geo->stride) { + sector -= geo->stride; + if (dev < geo->near_copies) + dev += geo->raid_disks - geo->near_copies; else - dev -= conf->near_copies; + dev -= geo->near_copies; } - chunk = sector >> conf->chunk_shift; + chunk = sector >> geo->chunk_shift; } - vchunk = chunk * conf->raid_disks + dev; - sector_div(vchunk, conf->near_copies); - return (vchunk << conf->chunk_shift) + offset; + vchunk = chunk * geo->raid_disks + dev; + sector_div(vchunk, geo->near_copies); + return (vchunk << geo->chunk_shift) + offset; } /** @@ -597,10 +623,17 @@ static int raid10_mergeable_bvec(struct request_queue *q, struct r10conf *conf = mddev->private; sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev); int max; - unsigned int chunk_sectors = mddev->chunk_sectors; + unsigned int chunk_sectors; unsigned int bio_sectors = bvm->bi_size >> 9; + struct geom *geo = &conf->geo; + + chunk_sectors = (conf->geo.chunk_mask & conf->prev.chunk_mask) + 1; + if (conf->reshape_progress != MaxSector && + ((sector >= conf->reshape_progress) != + conf->mddev->reshape_backwards)) + geo = &conf->prev; - if (conf->near_copies < conf->raid_disks) { + if (geo->near_copies < geo->raid_disks) { max = (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9; if (max < 0) @@ -614,6 +647,12 @@ static int raid10_mergeable_bvec(struct request_queue *q, if (mddev->merge_check_needed) { struct r10bio r10_bio; int s; + if (conf->reshape_progress != MaxSector) { + /* Cannot give any guidance during reshape */ + if (max <= biovec->bv_len && bio_sectors == 0) + return biovec->bv_len; + return 0; + } r10_bio.sector = sector; raid10_find_phys(conf, &r10_bio); rcu_read_lock(); @@ -681,6 +720,7 @@ static struct md_rdev *read_balance(struct r10conf *conf, struct md_rdev *rdev, *best_rdev; int do_balance; int best_slot; + struct geom *geo = &conf->geo; raid10_find_phys(conf, r10_bio); rcu_read_lock(); @@ -761,11 +801,11 @@ retry: * sequential read speed for 'far copies' arrays. So only * keep it for 'near' arrays, and review those later. */ - if (conf->near_copies > 1 && !atomic_read(&rdev->nr_pending)) + if (geo->near_copies > 1 && !atomic_read(&rdev->nr_pending)) break; /* for far > 1 always use the lowest address */ - if (conf->far_copies > 1) + if (geo->far_copies > 1) new_distance = r10_bio->devs[slot].addr; else new_distance = abs(r10_bio->devs[slot].addr - @@ -812,7 +852,10 @@ static int raid10_congested(void *data, int bits) if (mddev_congested(mddev, bits)) return 1; rcu_read_lock(); - for (i = 0; i < conf->raid_disks && ret == 0; i++) { + for (i = 0; + (i < conf->geo.raid_disks || i < conf->prev.raid_disks) + && ret == 0; + i++) { struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev); if (rdev && !test_bit(Faulty, &rdev->flags)) { struct request_queue *q = bdev_get_queue(rdev->bdev); @@ -973,21 +1016,32 @@ static void unfreeze_array(struct r10conf *conf) spin_unlock_irq(&conf->resync_lock); } +static sector_t choose_data_offset(struct r10bio *r10_bio, + struct md_rdev *rdev) +{ + if (!test_bit(MD_RECOVERY_RESHAPE, &rdev->mddev->recovery) || + test_bit(R10BIO_Previous, &r10_bio->state)) + return rdev->data_offset; + else + return rdev->new_data_offset; +} + static void make_request(struct mddev *mddev, struct bio * bio) { struct r10conf *conf = mddev->private; struct r10bio *r10_bio; struct bio *read_bio; int i; - int chunk_sects = conf->chunk_mask + 1; + sector_t chunk_mask = (conf->geo.chunk_mask & conf->prev.chunk_mask); + int chunk_sects = chunk_mask + 1; const int rw = bio_data_dir(bio); const unsigned long do_sync = (bio->bi_rw & REQ_SYNC); const unsigned long do_fua = (bio->bi_rw & REQ_FUA); unsigned long flags; struct md_rdev *blocked_rdev; - int plugged; int sectors_handled; int max_sectors; + int sectors; if (unlikely(bio->bi_rw & REQ_FLUSH)) { md_flush_request(mddev, bio); @@ -997,9 +1051,10 @@ static void make_request(struct mddev *mddev, struct bio * bio) /* If this request crosses a chunk boundary, we need to * split it. This will only happen for 1 PAGE (or less) requests. */ - if (unlikely( (bio->bi_sector & conf->chunk_mask) + (bio->bi_size >> 9) - > chunk_sects && - conf->near_copies < conf->raid_disks)) { + if (unlikely((bio->bi_sector & chunk_mask) + (bio->bi_size >> 9) + > chunk_sects + && (conf->geo.near_copies < conf->geo.raid_disks + || conf->prev.near_copies < conf->prev.raid_disks))) { struct bio_pair *bp; /* Sanity check -- queue functions should prevent this happening */ if (bio->bi_vcnt != 1 || @@ -1051,10 +1106,41 @@ static void make_request(struct mddev *mddev, struct bio * bio) */ wait_barrier(conf); + sectors = bio->bi_size >> 9; + while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && + bio->bi_sector < conf->reshape_progress && + bio->bi_sector + sectors > conf->reshape_progress) { + /* IO spans the reshape position. Need to wait for + * reshape to pass + */ + allow_barrier(conf); + wait_event(conf->wait_barrier, + conf->reshape_progress <= bio->bi_sector || + conf->reshape_progress >= bio->bi_sector + sectors); + wait_barrier(conf); + } + if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && + bio_data_dir(bio) == WRITE && + (mddev->reshape_backwards + ? (bio->bi_sector < conf->reshape_safe && + bio->bi_sector + sectors > conf->reshape_progress) + : (bio->bi_sector + sectors > conf->reshape_safe && + bio->bi_sector < conf->reshape_progress))) { + /* Need to update reshape_position in metadata */ + mddev->reshape_position = conf->reshape_progress; + set_bit(MD_CHANGE_DEVS, &mddev->flags); + set_bit(MD_CHANGE_PENDING, &mddev->flags); + md_wakeup_thread(mddev->thread); + wait_event(mddev->sb_wait, + !test_bit(MD_CHANGE_PENDING, &mddev->flags)); + + conf->reshape_safe = mddev->reshape_position; + } + r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO); r10_bio->master_bio = bio; - r10_bio->sectors = bio->bi_size >> 9; + r10_bio->sectors = sectors; r10_bio->mddev = mddev; r10_bio->sector = bio->bi_sector; @@ -1093,7 +1179,7 @@ read_again: r10_bio->devs[slot].rdev = rdev; read_bio->bi_sector = r10_bio->devs[slot].addr + - rdev->data_offset; + choose_data_offset(r10_bio, rdev); read_bio->bi_bdev = rdev->bdev; read_bio->bi_end_io = raid10_end_read_request; read_bio->bi_rw = READ | do_sync; @@ -1152,7 +1238,6 @@ read_again: * of r10_bios is recored in bio->bi_phys_segments just as with * the read case. */ - plugged = mddev_check_plugged(mddev); r10_bio->read_slot = -1; /* make sure repl_bio gets freed */ raid10_find_phys(conf, r10_bio); @@ -1297,7 +1382,8 @@ retry_write: r10_bio->devs[i].bio = mbio; mbio->bi_sector = (r10_bio->devs[i].addr+ - conf->mirrors[d].rdev->data_offset); + choose_data_offset(r10_bio, + conf->mirrors[d].rdev)); mbio->bi_bdev = conf->mirrors[d].rdev->bdev; mbio->bi_end_io = raid10_end_write_request; mbio->bi_rw = WRITE | do_sync | do_fua; @@ -1308,6 +1394,8 @@ retry_write: bio_list_add(&conf->pending_bio_list, mbio); conf->pending_count++; spin_unlock_irqrestore(&conf->device_lock, flags); + if (!mddev_check_plugged(mddev)) + md_wakeup_thread(mddev->thread); if (!r10_bio->devs[i].repl_bio) continue; @@ -1321,8 +1409,10 @@ retry_write: * so it cannot disappear, so the replacement cannot * become NULL here */ - mbio->bi_sector = (r10_bio->devs[i].addr+ - conf->mirrors[d].replacement->data_offset); + mbio->bi_sector = (r10_bio->devs[i].addr + + choose_data_offset( + r10_bio, + conf->mirrors[d].replacement)); mbio->bi_bdev = conf->mirrors[d].replacement->bdev; mbio->bi_end_io = raid10_end_write_request; mbio->bi_rw = WRITE | do_sync | do_fua; @@ -1333,6 +1423,8 @@ retry_write: bio_list_add(&conf->pending_bio_list, mbio); conf->pending_count++; spin_unlock_irqrestore(&conf->device_lock, flags); + if (!mddev_check_plugged(mddev)) + md_wakeup_thread(mddev->thread); } /* Don't remove the bias on 'remaining' (one_write_done) until @@ -1358,9 +1450,6 @@ retry_write: /* In case raid10d snuck in to freeze_array */ wake_up(&conf->wait_barrier); - - if (do_sync || !mddev->bitmap || !plugged) - md_wakeup_thread(mddev->thread); } static void status(struct seq_file *seq, struct mddev *mddev) @@ -1368,19 +1457,19 @@ static void status(struct seq_file *seq, struct mddev *mddev) struct r10conf *conf = mddev->private; int i; - if (conf->near_copies < conf->raid_disks) + if (conf->geo.near_copies < conf->geo.raid_disks) seq_printf(seq, " %dK chunks", mddev->chunk_sectors / 2); - if (conf->near_copies > 1) - seq_printf(seq, " %d near-copies", conf->near_copies); - if (conf->far_copies > 1) { - if (conf->far_offset) - seq_printf(seq, " %d offset-copies", conf->far_copies); + if (conf->geo.near_copies > 1) + seq_printf(seq, " %d near-copies", conf->geo.near_copies); + if (conf->geo.far_copies > 1) { + if (conf->geo.far_offset) + seq_printf(seq, " %d offset-copies", conf->geo.far_copies); else - seq_printf(seq, " %d far-copies", conf->far_copies); + seq_printf(seq, " %d far-copies", conf->geo.far_copies); } - seq_printf(seq, " [%d/%d] [", conf->raid_disks, - conf->raid_disks - mddev->degraded); - for (i = 0; i < conf->raid_disks; i++) + seq_printf(seq, " [%d/%d] [", conf->geo.raid_disks, + conf->geo.raid_disks - mddev->degraded); + for (i = 0; i < conf->geo.raid_disks; i++) seq_printf(seq, "%s", conf->mirrors[i].rdev && test_bit(In_sync, &conf->mirrors[i].rdev->flags) ? "U" : "_"); @@ -1392,7 +1481,7 @@ static void status(struct seq_file *seq, struct mddev *mddev) * Don't consider the device numbered 'ignore' * as we might be about to remove it. */ -static int enough(struct r10conf *conf, int ignore) +static int _enough(struct r10conf *conf, struct geom *geo, int ignore) { int first = 0; @@ -1403,7 +1492,7 @@ static int enough(struct r10conf *conf, int ignore) if (conf->mirrors[first].rdev && first != ignore) cnt++; - first = (first+1) % conf->raid_disks; + first = (first+1) % geo->raid_disks; } if (cnt == 0) return 0; @@ -1411,6 +1500,12 @@ static int enough(struct r10conf *conf, int ignore) return 1; } +static int enough(struct r10conf *conf, int ignore) +{ + return _enough(conf, &conf->geo, ignore) && + _enough(conf, &conf->prev, ignore); +} + static void error(struct mddev *mddev, struct md_rdev *rdev) { char b[BDEVNAME_SIZE]; @@ -1445,7 +1540,7 @@ static void error(struct mddev *mddev, struct md_rdev *rdev) "md/raid10:%s: Disk failure on %s, disabling device.\n" "md/raid10:%s: Operation continuing on %d devices.\n", mdname(mddev), bdevname(rdev->bdev, b), - mdname(mddev), conf->raid_disks - mddev->degraded); + mdname(mddev), conf->geo.raid_disks - mddev->degraded); } static void print_conf(struct r10conf *conf) @@ -1458,10 +1553,10 @@ static void print_conf(struct r10conf *conf) printk(KERN_DEBUG "(!conf)\n"); return; } - printk(KERN_DEBUG " --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded, - conf->raid_disks); + printk(KERN_DEBUG " --- wd:%d rd:%d\n", conf->geo.raid_disks - conf->mddev->degraded, + conf->geo.raid_disks); - for (i = 0; i < conf->raid_disks; i++) { + for (i = 0; i < conf->geo.raid_disks; i++) { char b[BDEVNAME_SIZE]; tmp = conf->mirrors + i; if (tmp->rdev) @@ -1493,7 +1588,7 @@ static int raid10_spare_active(struct mddev *mddev) * Find all non-in_sync disks within the RAID10 configuration * and mark them in_sync */ - for (i = 0; i < conf->raid_disks; i++) { + for (i = 0; i < conf->geo.raid_disks; i++) { tmp = conf->mirrors + i; if (tmp->replacement && tmp->replacement->recovery_offset == MaxSector @@ -1535,7 +1630,7 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev) int err = -EEXIST; int mirror; int first = 0; - int last = conf->raid_disks - 1; + int last = conf->geo.raid_disks - 1; struct request_queue *q = bdev_get_queue(rdev->bdev); if (mddev->recovery_cp < MaxSector) @@ -1543,7 +1638,7 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev) * very different from resync */ return -EBUSY; - if (rdev->saved_raid_disk < 0 && !enough(conf, -1)) + if (rdev->saved_raid_disk < 0 && !_enough(conf, &conf->prev, -1)) return -EINVAL; if (rdev->raid_disk >= 0) @@ -1635,6 +1730,7 @@ static int raid10_remove_disk(struct mddev *mddev, struct md_rdev *rdev) if (!test_bit(Faulty, &rdev->flags) && mddev->recovery_disabled != p->recovery_disabled && (!p->replacement || p->replacement == rdev) && + number < conf->geo.raid_disks && enough(conf, -1)) { err = -EBUSY; goto abort; @@ -1676,7 +1772,11 @@ static void end_sync_read(struct bio *bio, int error) struct r10conf *conf = r10_bio->mddev->private; int d; - d = find_bio_disk(conf, r10_bio, bio, NULL, NULL); + if (bio == r10_bio->master_bio) { + /* this is a reshape read */ + d = r10_bio->read_slot; /* really the read dev */ + } else + d = find_bio_disk(conf, r10_bio, bio, NULL, NULL); if (test_bit(BIO_UPTODATE, &bio->bi_flags)) set_bit(R10BIO_Uptodate, &r10_bio->state); @@ -2209,7 +2309,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10 if (r10_sync_page_io(rdev, r10_bio->devs[sl].addr + sect, - s<<9, conf->tmppage, WRITE) + s, conf->tmppage, WRITE) == 0) { /* Well, this device is dead */ printk(KERN_NOTICE @@ -2218,7 +2318,9 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10 " (%d sectors at %llu on %s)\n", mdname(mddev), s, (unsigned long long)( - sect + rdev->data_offset), + sect + + choose_data_offset(r10_bio, + rdev)), bdevname(rdev->bdev, b)); printk(KERN_NOTICE "md/raid10:%s: %s: failing " "drive\n", @@ -2246,7 +2348,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10 switch (r10_sync_page_io(rdev, r10_bio->devs[sl].addr + sect, - s<<9, conf->tmppage, + s, conf->tmppage, READ)) { case 0: /* Well, this device is dead */ @@ -2256,7 +2358,8 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10 " (%d sectors at %llu on %s)\n", mdname(mddev), s, (unsigned long long)( - sect + rdev->data_offset), + sect + + choose_data_offset(r10_bio, rdev)), bdevname(rdev->bdev, b)); printk(KERN_NOTICE "md/raid10:%s: %s: failing " "drive\n", @@ -2269,7 +2372,8 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10 " (%d sectors at %llu on %s)\n", mdname(mddev), s, (unsigned long long)( - sect + rdev->data_offset), + sect + + choose_data_offset(r10_bio, rdev)), bdevname(rdev->bdev, b)); atomic_add(s, &rdev->corrected_errors); } @@ -2343,7 +2447,7 @@ static int narrow_write_error(struct r10bio *r10_bio, int i) wbio = bio_clone_mddev(bio, GFP_NOIO, mddev); md_trim_bio(wbio, sector - bio->bi_sector, sectors); wbio->bi_sector = (r10_bio->devs[i].addr+ - rdev->data_offset+ + choose_data_offset(r10_bio, rdev) + (sector - r10_bio->sector)); wbio->bi_bdev = rdev->bdev; if (submit_bio_wait(WRITE, wbio) == 0) @@ -2407,7 +2511,7 @@ read_more: slot = r10_bio->read_slot; printk_ratelimited( KERN_ERR - "md/raid10:%s: %s: redirecting" + "md/raid10:%s: %s: redirecting " "sector %llu to another mirror\n", mdname(mddev), bdevname(rdev->bdev, b), @@ -2420,7 +2524,7 @@ read_more: r10_bio->devs[slot].bio = bio; r10_bio->devs[slot].rdev = rdev; bio->bi_sector = r10_bio->devs[slot].addr - + rdev->data_offset; + + choose_data_offset(r10_bio, rdev); bio->bi_bdev = rdev->bdev; bio->bi_rw = READ | do_sync; bio->bi_private = r10_bio; @@ -2480,7 +2584,7 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio) rdev_clear_badblocks( rdev, r10_bio->devs[m].addr, - r10_bio->sectors); + r10_bio->sectors, 0); } else { if (!rdev_set_badblocks( rdev, @@ -2496,7 +2600,7 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio) rdev_clear_badblocks( rdev, r10_bio->devs[m].addr, - r10_bio->sectors); + r10_bio->sectors, 0); } else { if (!rdev_set_badblocks( rdev, @@ -2515,7 +2619,7 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio) rdev_clear_badblocks( rdev, r10_bio->devs[m].addr, - r10_bio->sectors); + r10_bio->sectors, 0); rdev_dec_pending(rdev, conf->mddev); } else if (bio != NULL && !test_bit(BIO_UPTODATE, &bio->bi_flags)) { @@ -2532,7 +2636,7 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio) rdev_clear_badblocks( rdev, r10_bio->devs[m].addr, - r10_bio->sectors); + r10_bio->sectors, 0); rdev_dec_pending(rdev, conf->mddev); } } @@ -2556,7 +2660,8 @@ static void raid10d(struct mddev *mddev) blk_start_plug(&plug); for (;;) { - flush_pending_writes(conf); + if (atomic_read(&mddev->plug_cnt) == 0) + flush_pending_writes(conf); spin_lock_irqsave(&conf->device_lock, flags); if (list_empty(head)) { @@ -2573,6 +2678,8 @@ static void raid10d(struct mddev *mddev) if (test_bit(R10BIO_MadeGood, &r10_bio->state) || test_bit(R10BIO_WriteError, &r10_bio->state)) handle_write_completed(conf, r10_bio); + else if (test_bit(R10BIO_IsReshape, &r10_bio->state)) + reshape_request_write(mddev, r10_bio); else if (test_bit(R10BIO_IsSync, &r10_bio->state)) sync_request_write(mddev, r10_bio); else if (test_bit(R10BIO_IsRecover, &r10_bio->state)) @@ -2603,7 +2710,7 @@ static int init_resync(struct r10conf *conf) buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE; BUG_ON(conf->r10buf_pool); conf->have_replacement = 0; - for (i = 0; i < conf->raid_disks; i++) + for (i = 0; i < conf->geo.raid_disks; i++) if (conf->mirrors[i].replacement) conf->have_replacement = 1; conf->r10buf_pool = mempool_create(buffs, r10buf_pool_alloc, r10buf_pool_free, conf); @@ -2657,6 +2764,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, sector_t sync_blocks; sector_t sectors_skipped = 0; int chunks_skipped = 0; + sector_t chunk_mask = conf->geo.chunk_mask; if (!conf->r10buf_pool) if (init_resync(conf)) @@ -2664,7 +2772,8 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, skipped: max_sector = mddev->dev_sectors; - if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) + if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) || + test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) max_sector = mddev->resync_max_sectors; if (sector_nr >= max_sector) { /* If we aborted, we need to abort the @@ -2676,11 +2785,16 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, * we need to convert that to several * virtual addresses. */ + if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) { + end_reshape(conf); + return 0; + } + if (mddev->curr_resync < max_sector) { /* aborted */ if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) bitmap_end_sync(mddev->bitmap, mddev->curr_resync, &sync_blocks, 1); - else for (i=0; i<conf->raid_disks; i++) { + else for (i = 0; i < conf->geo.raid_disks; i++) { sector_t sect = raid10_find_virt(conf, mddev->curr_resync, i); bitmap_end_sync(mddev->bitmap, sect, @@ -2694,7 +2808,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, /* Completed a full sync so the replacements * are now fully recovered. */ - for (i = 0; i < conf->raid_disks; i++) + for (i = 0; i < conf->geo.raid_disks; i++) if (conf->mirrors[i].replacement) conf->mirrors[i].replacement ->recovery_offset @@ -2707,7 +2821,11 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, *skipped = 1; return sectors_skipped; } - if (chunks_skipped >= conf->raid_disks) { + + if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) + return reshape_request(mddev, sector_nr, skipped); + + if (chunks_skipped >= conf->geo.raid_disks) { /* if there has been nothing to do on any drive, * then there is nothing to do at all.. */ @@ -2721,9 +2839,9 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, /* make sure whole request will fit in a chunk - if chunks * are meaningful */ - if (conf->near_copies < conf->raid_disks && - max_sector > (sector_nr | conf->chunk_mask)) - max_sector = (sector_nr | conf->chunk_mask) + 1; + if (conf->geo.near_copies < conf->geo.raid_disks && + max_sector > (sector_nr | chunk_mask)) + max_sector = (sector_nr | chunk_mask) + 1; /* * If there is non-resync activity waiting for us then * put in a delay to throttle resync. @@ -2752,7 +2870,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int j; r10_bio = NULL; - for (i=0 ; i<conf->raid_disks; i++) { + for (i = 0 ; i < conf->geo.raid_disks; i++) { int still_degraded; struct r10bio *rb2; sector_t sect; @@ -2772,6 +2890,12 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, /* want to reconstruct this device */ rb2 = r10_bio; sect = raid10_find_virt(conf, sector_nr, i); + if (sect >= mddev->resync_max_sectors) { + /* last stripe is not complete - don't + * try to recover this sector. + */ + continue; + } /* Unless we are doing a full sync, or a replacement * we only need to recover the block if it is set in * the bitmap @@ -2806,7 +2930,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, /* Need to check if the array will still be * degraded */ - for (j=0; j<conf->raid_disks; j++) + for (j = 0; j < conf->geo.raid_disks; j++) if (conf->mirrors[j].rdev == NULL || test_bit(Faulty, &conf->mirrors[j].rdev->flags)) { still_degraded = 1; @@ -2984,9 +3108,9 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, r10_bio->sector = sector_nr; set_bit(R10BIO_IsSync, &r10_bio->state); raid10_find_phys(conf, r10_bio); - r10_bio->sectors = (sector_nr | conf->chunk_mask) - sector_nr +1; + r10_bio->sectors = (sector_nr | chunk_mask) - sector_nr + 1; - for (i=0; i<conf->copies; i++) { + for (i = 0; i < conf->copies; i++) { int d = r10_bio->devs[i].devnum; sector_t first_bad, sector; int bad_sectors; @@ -3152,16 +3276,17 @@ raid10_size(struct mddev *mddev, sector_t sectors, int raid_disks) struct r10conf *conf = mddev->private; if (!raid_disks) - raid_disks = conf->raid_disks; + raid_disks = min(conf->geo.raid_disks, + conf->prev.raid_disks); if (!sectors) sectors = conf->dev_sectors; - size = sectors >> conf->chunk_shift; - sector_div(size, conf->far_copies); + size = sectors >> conf->geo.chunk_shift; + sector_div(size, conf->geo.far_copies); size = size * raid_disks; - sector_div(size, conf->near_copies); + sector_div(size, conf->geo.near_copies); - return size << conf->chunk_shift; + return size << conf->geo.chunk_shift; } static void calc_sectors(struct r10conf *conf, sector_t size) @@ -3171,10 +3296,10 @@ static void calc_sectors(struct r10conf *conf, sector_t size) * conf->stride */ - size = size >> conf->chunk_shift; - sector_div(size, conf->far_copies); - size = size * conf->raid_disks; - sector_div(size, conf->near_copies); + size = size >> conf->geo.chunk_shift; + sector_div(size, conf->geo.far_copies); + size = size * conf->geo.raid_disks; + sector_div(size, conf->geo.near_copies); /* 'size' is now the number of chunks in the array */ /* calculate "used chunks per device" */ size = size * conf->copies; @@ -3182,38 +3307,76 @@ static void calc_sectors(struct r10conf *conf, sector_t size) /* We need to round up when dividing by raid_disks to * get the stride size. */ - size = DIV_ROUND_UP_SECTOR_T(size, conf->raid_disks); + size = DIV_ROUND_UP_SECTOR_T(size, conf->geo.raid_disks); - conf->dev_sectors = size << conf->chunk_shift; + conf->dev_sectors = size << conf->geo.chunk_shift; - if (conf->far_offset) - conf->stride = 1 << conf->chunk_shift; + if (conf->geo.far_offset) + conf->geo.stride = 1 << conf->geo.chunk_shift; else { - sector_div(size, conf->far_copies); - conf->stride = size << conf->chunk_shift; + sector_div(size, conf->geo.far_copies); + conf->geo.stride = size << conf->geo.chunk_shift; } } +enum geo_type {geo_new, geo_old, geo_start}; +static int setup_geo(struct geom *geo, struct mddev *mddev, enum geo_type new) +{ + int nc, fc, fo; + int layout, chunk, disks; + switch (new) { + case geo_old: + layout = mddev->layout; + chunk = mddev->chunk_sectors; + disks = mddev->raid_disks - mddev->delta_disks; + break; + case geo_new: + layout = mddev->new_layout; + chunk = mddev->new_chunk_sectors; + disks = mddev->raid_disks; + break; + default: /* avoid 'may be unused' warnings */ + case geo_start: /* new when starting reshape - raid_disks not + * updated yet. */ + layout = mddev->new_layout; + chunk = mddev->new_chunk_sectors; + disks = mddev->raid_disks + mddev->delta_disks; + break; + } + if (layout >> 17) + return -1; + if (chunk < (PAGE_SIZE >> 9) || + !is_power_of_2(chunk)) + return -2; + nc = layout & 255; + fc = (layout >> 8) & 255; + fo = layout & (1<<16); + geo->raid_disks = disks; + geo->near_copies = nc; + geo->far_copies = fc; + geo->far_offset = fo; + geo->chunk_mask = chunk - 1; + geo->chunk_shift = ffz(~chunk); + return nc*fc; +} + static struct r10conf *setup_conf(struct mddev *mddev) { struct r10conf *conf = NULL; - int nc, fc, fo; int err = -EINVAL; + struct geom geo; + int copies; + + copies = setup_geo(&geo, mddev, geo_new); - if (mddev->new_chunk_sectors < (PAGE_SIZE >> 9) || - !is_power_of_2(mddev->new_chunk_sectors)) { + if (copies == -2) { printk(KERN_ERR "md/raid10:%s: chunk size must be " "at least PAGE_SIZE(%ld) and be a power of 2.\n", mdname(mddev), PAGE_SIZE); goto out; } - nc = mddev->new_layout & 255; - fc = (mddev->new_layout >> 8) & 255; - fo = mddev->new_layout & (1<<16); - - if ((nc*fc) <2 || (nc*fc) > mddev->raid_disks || - (mddev->new_layout >> 17)) { + if (copies < 2 || copies > mddev->raid_disks) { printk(KERN_ERR "md/raid10:%s: unsupported raid10 layout: 0x%8x\n", mdname(mddev), mddev->new_layout); goto out; @@ -3224,7 +3387,9 @@ static struct r10conf *setup_conf(struct mddev *mddev) if (!conf) goto out; - conf->mirrors = kzalloc(sizeof(struct mirror_info)*mddev->raid_disks, + /* FIXME calc properly */ + conf->mirrors = kzalloc(sizeof(struct mirror_info)*(mddev->raid_disks + + max(0,mddev->delta_disks)), GFP_KERNEL); if (!conf->mirrors) goto out; @@ -3233,29 +3398,36 @@ static struct r10conf *setup_conf(struct mddev *mddev) if (!conf->tmppage) goto out; - - conf->raid_disks = mddev->raid_disks; - conf->near_copies = nc; - conf->far_copies = fc; - conf->copies = nc*fc; - conf->far_offset = fo; - conf->chunk_mask = mddev->new_chunk_sectors - 1; - conf->chunk_shift = ffz(~mddev->new_chunk_sectors); - + conf->geo = geo; + conf->copies = copies; conf->r10bio_pool = mempool_create(NR_RAID10_BIOS, r10bio_pool_alloc, r10bio_pool_free, conf); if (!conf->r10bio_pool) goto out; calc_sectors(conf, mddev->dev_sectors); - + if (mddev->reshape_position == MaxSector) { + conf->prev = conf->geo; + conf->reshape_progress = MaxSector; + } else { + if (setup_geo(&conf->prev, mddev, geo_old) != conf->copies) { + err = -EINVAL; + goto out; + } + conf->reshape_progress = mddev->reshape_position; + if (conf->prev.far_offset) + conf->prev.stride = 1 << conf->prev.chunk_shift; + else + /* far_copies must be 1 */ + conf->prev.stride = conf->dev_sectors; + } spin_lock_init(&conf->device_lock); INIT_LIST_HEAD(&conf->retry_list); spin_lock_init(&conf->resync_lock); init_waitqueue_head(&conf->wait_barrier); - conf->thread = md_register_thread(raid10d, mddev, NULL); + conf->thread = md_register_thread(raid10d, mddev, "raid10"); if (!conf->thread) goto out; @@ -3263,8 +3435,9 @@ static struct r10conf *setup_conf(struct mddev *mddev) return conf; out: - printk(KERN_ERR "md/raid10:%s: couldn't allocate memory.\n", - mdname(mddev)); + if (err == -ENOMEM) + printk(KERN_ERR "md/raid10:%s: couldn't allocate memory.\n", + mdname(mddev)); if (conf) { if (conf->r10bio_pool) mempool_destroy(conf->r10bio_pool); @@ -3282,12 +3455,8 @@ static int run(struct mddev *mddev) struct mirror_info *disk; struct md_rdev *rdev; sector_t size; - - /* - * copy the already verified devices into our private RAID10 - * bookkeeping area. [whatever we allocate in run(), - * should be freed in stop()] - */ + sector_t min_offset_diff = 0; + int first = 1; if (mddev->private == NULL) { conf = setup_conf(mddev); @@ -3304,17 +3473,21 @@ static int run(struct mddev *mddev) chunk_size = mddev->chunk_sectors << 9; blk_queue_io_min(mddev->queue, chunk_size); - if (conf->raid_disks % conf->near_copies) - blk_queue_io_opt(mddev->queue, chunk_size * conf->raid_disks); + if (conf->geo.raid_disks % conf->geo.near_copies) + blk_queue_io_opt(mddev->queue, chunk_size * conf->geo.raid_disks); else blk_queue_io_opt(mddev->queue, chunk_size * - (conf->raid_disks / conf->near_copies)); + (conf->geo.raid_disks / conf->geo.near_copies)); rdev_for_each(rdev, mddev) { + long long diff; + struct request_queue *q; disk_idx = rdev->raid_disk; - if (disk_idx >= conf->raid_disks - || disk_idx < 0) + if (disk_idx < 0) + continue; + if (disk_idx >= conf->geo.raid_disks && + disk_idx >= conf->prev.raid_disks) continue; disk = conf->mirrors + disk_idx; @@ -3327,12 +3500,23 @@ static int run(struct mddev *mddev) goto out_free_conf; disk->rdev = rdev; } + q = bdev_get_queue(rdev->bdev); + if (q->merge_bvec_fn) + mddev->merge_check_needed = 1; + diff = (rdev->new_data_offset - rdev->data_offset); + if (!mddev->reshape_backwards) + diff = -diff; + if (diff < 0) + diff = 0; + if (first || diff < min_offset_diff) + min_offset_diff = diff; disk_stack_limits(mddev->gendisk, rdev->bdev, rdev->data_offset << 9); disk->head_position = 0; } + /* need to check that every block has at least one working mirror */ if (!enough(conf, -1)) { printk(KERN_ERR "md/raid10:%s: not enough operational mirrors.\n", @@ -3340,8 +3524,21 @@ static int run(struct mddev *mddev) goto out_free_conf; } + if (conf->reshape_progress != MaxSector) { + /* must ensure that shape change is supported */ + if (conf->geo.far_copies != 1 && + conf->geo.far_offset == 0) + goto out_free_conf; + if (conf->prev.far_copies != 1 && + conf->geo.far_offset == 0) + goto out_free_conf; + } + mddev->degraded = 0; - for (i = 0; i < conf->raid_disks; i++) { + for (i = 0; + i < conf->geo.raid_disks + || i < conf->prev.raid_disks; + i++) { disk = conf->mirrors + i; @@ -3368,8 +3565,8 @@ static int run(struct mddev *mddev) mdname(mddev)); printk(KERN_INFO "md/raid10:%s: active with %d out of %d devices\n", - mdname(mddev), conf->raid_disks - mddev->degraded, - conf->raid_disks); + mdname(mddev), conf->geo.raid_disks - mddev->degraded, + conf->geo.raid_disks); /* * Ok, everything is just fine now */ @@ -3386,11 +3583,11 @@ static int run(struct mddev *mddev) * maybe... */ { - int stripe = conf->raid_disks * + int stripe = conf->geo.raid_disks * ((mddev->chunk_sectors << 9) / PAGE_SIZE); - stripe /= conf->near_copies; - if (mddev->queue->backing_dev_info.ra_pages < 2* stripe) - mddev->queue->backing_dev_info.ra_pages = 2* stripe; + stripe /= conf->geo.near_copies; + if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe) + mddev->queue->backing_dev_info.ra_pages = 2 * stripe; } blk_queue_merge_bvec(mddev->queue, raid10_mergeable_bvec); @@ -3398,6 +3595,30 @@ static int run(struct mddev *mddev) if (md_integrity_register(mddev)) goto out_free_conf; + if (conf->reshape_progress != MaxSector) { + unsigned long before_length, after_length; + + before_length = ((1 << conf->prev.chunk_shift) * + conf->prev.far_copies); + after_length = ((1 << conf->geo.chunk_shift) * + conf->geo.far_copies); + + if (max(before_length, after_length) > min_offset_diff) { + /* This cannot work */ + printk("md/raid10: offset difference not enough to continue reshape\n"); + goto out_free_conf; + } + conf->offset_diff = min_offset_diff; + + conf->reshape_safe = conf->reshape_progress; + clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); + clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); + set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); + set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); + mddev->sync_thread = md_register_thread(md_do_sync, mddev, + "reshape"); + } + return 0; out_free_conf: @@ -3460,14 +3681,23 @@ static int raid10_resize(struct mddev *mddev, sector_t sectors) struct r10conf *conf = mddev->private; sector_t oldsize, size; - if (conf->far_copies > 1 && !conf->far_offset) + if (mddev->reshape_position != MaxSector) + return -EBUSY; + + if (conf->geo.far_copies > 1 && !conf->geo.far_offset) return -EINVAL; oldsize = raid10_size(mddev, 0, 0); size = raid10_size(mddev, sectors, 0); - md_set_array_sectors(mddev, size); - if (mddev->array_sectors > size) + if (mddev->external_size && + mddev->array_sectors > size) return -EINVAL; + if (mddev->bitmap) { + int ret = bitmap_resize(mddev->bitmap, size, 0, 0); + if (ret) + return ret; + } + md_set_array_sectors(mddev, size); set_capacity(mddev->gendisk, mddev->array_sectors); revalidate_disk(mddev->gendisk); if (sectors > mddev->dev_sectors && @@ -3534,6 +3764,758 @@ static void *raid10_takeover(struct mddev *mddev) return ERR_PTR(-EINVAL); } +static int raid10_check_reshape(struct mddev *mddev) +{ + /* Called when there is a request to change + * - layout (to ->new_layout) + * - chunk size (to ->new_chunk_sectors) + * - raid_disks (by delta_disks) + * or when trying to restart a reshape that was ongoing. + * + * We need to validate the request and possibly allocate + * space if that might be an issue later. + * + * Currently we reject any reshape of a 'far' mode array, + * allow chunk size to change if new is generally acceptable, + * allow raid_disks to increase, and allow + * a switch between 'near' mode and 'offset' mode. + */ + struct r10conf *conf = mddev->private; + struct geom geo; + + if (conf->geo.far_copies != 1 && !conf->geo.far_offset) + return -EINVAL; + + if (setup_geo(&geo, mddev, geo_start) != conf->copies) + /* mustn't change number of copies */ + return -EINVAL; + if (geo.far_copies > 1 && !geo.far_offset) + /* Cannot switch to 'far' mode */ + return -EINVAL; + + if (mddev->array_sectors & geo.chunk_mask) + /* not factor of array size */ + return -EINVAL; + + if (!enough(conf, -1)) + return -EINVAL; + + kfree(conf->mirrors_new); + conf->mirrors_new = NULL; + if (mddev->delta_disks > 0) { + /* allocate new 'mirrors' list */ + conf->mirrors_new = kzalloc( + sizeof(struct mirror_info) + *(mddev->raid_disks + + mddev->delta_disks), + GFP_KERNEL); + if (!conf->mirrors_new) + return -ENOMEM; + } + return 0; +} + +/* + * Need to check if array has failed when deciding whether to: + * - start an array + * - remove non-faulty devices + * - add a spare + * - allow a reshape + * This determination is simple when no reshape is happening. + * However if there is a reshape, we need to carefully check + * both the before and after sections. + * This is because some failed devices may only affect one + * of the two sections, and some non-in_sync devices may + * be insync in the section most affected by failed devices. + */ +static int calc_degraded(struct r10conf *conf) +{ + int degraded, degraded2; + int i; + + rcu_read_lock(); + degraded = 0; + /* 'prev' section first */ + for (i = 0; i < conf->prev.raid_disks; i++) { + struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev); + if (!rdev || test_bit(Faulty, &rdev->flags)) + degraded++; + else if (!test_bit(In_sync, &rdev->flags)) + /* When we can reduce the number of devices in + * an array, this might not contribute to + * 'degraded'. It does now. + */ + degraded++; + } + rcu_read_unlock(); + if (conf->geo.raid_disks == conf->prev.raid_disks) + return degraded; + rcu_read_lock(); + degraded2 = 0; + for (i = 0; i < conf->geo.raid_disks; i++) { + struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev); + if (!rdev || test_bit(Faulty, &rdev->flags)) + degraded2++; + else if (!test_bit(In_sync, &rdev->flags)) { + /* If reshape is increasing the number of devices, + * this section has already been recovered, so + * it doesn't contribute to degraded. + * else it does. + */ + if (conf->geo.raid_disks <= conf->prev.raid_disks) + degraded2++; + } + } + rcu_read_unlock(); + if (degraded2 > degraded) + return degraded2; + return degraded; +} + +static int raid10_start_reshape(struct mddev *mddev) +{ + /* A 'reshape' has been requested. This commits + * the various 'new' fields and sets MD_RECOVER_RESHAPE + * This also checks if there are enough spares and adds them + * to the array. + * We currently require enough spares to make the final + * array non-degraded. We also require that the difference + * between old and new data_offset - on each device - is + * enough that we never risk over-writing. + */ + + unsigned long before_length, after_length; + sector_t min_offset_diff = 0; + int first = 1; + struct geom new; + struct r10conf *conf = mddev->private; + struct md_rdev *rdev; + int spares = 0; + int ret; + + if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) + return -EBUSY; + + if (setup_geo(&new, mddev, geo_start) != conf->copies) + return -EINVAL; + + before_length = ((1 << conf->prev.chunk_shift) * + conf->prev.far_copies); + after_length = ((1 << conf->geo.chunk_shift) * + conf->geo.far_copies); + + rdev_for_each(rdev, mddev) { + if (!test_bit(In_sync, &rdev->flags) + && !test_bit(Faulty, &rdev->flags)) + spares++; + if (rdev->raid_disk >= 0) { + long long diff = (rdev->new_data_offset + - rdev->data_offset); + if (!mddev->reshape_backwards) + diff = -diff; + if (diff < 0) + diff = 0; + if (first || diff < min_offset_diff) + min_offset_diff = diff; + } + } + + if (max(before_length, after_length) > min_offset_diff) + return -EINVAL; + + if (spares < mddev->delta_disks) + return -EINVAL; + + conf->offset_diff = min_offset_diff; + spin_lock_irq(&conf->device_lock); + if (conf->mirrors_new) { + memcpy(conf->mirrors_new, conf->mirrors, + sizeof(struct mirror_info)*conf->prev.raid_disks); + smp_mb(); + kfree(conf->mirrors_old); /* FIXME and elsewhere */ + conf->mirrors_old = conf->mirrors; + conf->mirrors = conf->mirrors_new; + conf->mirrors_new = NULL; + } + setup_geo(&conf->geo, mddev, geo_start); + smp_mb(); + if (mddev->reshape_backwards) { + sector_t size = raid10_size(mddev, 0, 0); + if (size < mddev->array_sectors) { + spin_unlock_irq(&conf->device_lock); + printk(KERN_ERR "md/raid10:%s: array size must be reduce before number of disks\n", + mdname(mddev)); + return -EINVAL; + } + mddev->resync_max_sectors = size; + conf->reshape_progress = size; + } else + conf->reshape_progress = 0; + spin_unlock_irq(&conf->device_lock); + + if (mddev->delta_disks && mddev->bitmap) { + ret = bitmap_resize(mddev->bitmap, + raid10_size(mddev, 0, + conf->geo.raid_disks), + 0, 0); + if (ret) + goto abort; + } + if (mddev->delta_disks > 0) { + rdev_for_each(rdev, mddev) + if (rdev->raid_disk < 0 && + !test_bit(Faulty, &rdev->flags)) { + if (raid10_add_disk(mddev, rdev) == 0) { + if (rdev->raid_disk >= + conf->prev.raid_disks) + set_bit(In_sync, &rdev->flags); + else + rdev->recovery_offset = 0; + + if (sysfs_link_rdev(mddev, rdev)) + /* Failure here is OK */; + } + } else if (rdev->raid_disk >= conf->prev.raid_disks + && !test_bit(Faulty, &rdev->flags)) { + /* This is a spare that was manually added */ + set_bit(In_sync, &rdev->flags); + } + } + /* When a reshape changes the number of devices, + * ->degraded is measured against the larger of the + * pre and post numbers. + */ + spin_lock_irq(&conf->device_lock); + mddev->degraded = calc_degraded(conf); + spin_unlock_irq(&conf->device_lock); + mddev->raid_disks = conf->geo.raid_disks; + mddev->reshape_position = conf->reshape_progress; + set_bit(MD_CHANGE_DEVS, &mddev->flags); + + clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); + clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); + set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); + set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); + + mddev->sync_thread = md_register_thread(md_do_sync, mddev, + "reshape"); + if (!mddev->sync_thread) { + ret = -EAGAIN; + goto abort; + } + conf->reshape_checkpoint = jiffies; + md_wakeup_thread(mddev->sync_thread); + md_new_event(mddev); + return 0; + +abort: + mddev->recovery = 0; + spin_lock_irq(&conf->device_lock); + conf->geo = conf->prev; + mddev->raid_disks = conf->geo.raid_disks; + rdev_for_each(rdev, mddev) + rdev->new_data_offset = rdev->data_offset; + smp_wmb(); + conf->reshape_progress = MaxSector; + mddev->reshape_position = MaxSector; + spin_unlock_irq(&conf->device_lock); + return ret; +} + +/* Calculate the last device-address that could contain + * any block from the chunk that includes the array-address 's' + * and report the next address. + * i.e. the address returned will be chunk-aligned and after + * any data that is in the chunk containing 's'. + */ +static sector_t last_dev_address(sector_t s, struct geom *geo) +{ + s = (s | geo->chunk_mask) + 1; + s >>= geo->chunk_shift; + s *= geo->near_copies; + s = DIV_ROUND_UP_SECTOR_T(s, geo->raid_disks); + s *= geo->far_copies; + s <<= geo->chunk_shift; + return s; +} + +/* Calculate the first device-address that could contain + * any block from the chunk that includes the array-address 's'. + * This too will be the start of a chunk + */ +static sector_t first_dev_address(sector_t s, struct geom *geo) +{ + s >>= geo->chunk_shift; + s *= geo->near_copies; + sector_div(s, geo->raid_disks); + s *= geo->far_copies; + s <<= geo->chunk_shift; + return s; +} + +static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, + int *skipped) +{ + /* We simply copy at most one chunk (smallest of old and new) + * at a time, possibly less if that exceeds RESYNC_PAGES, + * or we hit a bad block or something. + * This might mean we pause for normal IO in the middle of + * a chunk, but that is not a problem was mddev->reshape_position + * can record any location. + * + * If we will want to write to a location that isn't + * yet recorded as 'safe' (i.e. in metadata on disk) then + * we need to flush all reshape requests and update the metadata. + * + * When reshaping forwards (e.g. to more devices), we interpret + * 'safe' as the earliest block which might not have been copied + * down yet. We divide this by previous stripe size and multiply + * by previous stripe length to get lowest device offset that we + * cannot write to yet. + * We interpret 'sector_nr' as an address that we want to write to. + * From this we use last_device_address() to find where we might + * write to, and first_device_address on the 'safe' position. + * If this 'next' write position is after the 'safe' position, + * we must update the metadata to increase the 'safe' position. + * + * When reshaping backwards, we round in the opposite direction + * and perform the reverse test: next write position must not be + * less than current safe position. + * + * In all this the minimum difference in data offsets + * (conf->offset_diff - always positive) allows a bit of slack, + * so next can be after 'safe', but not by more than offset_disk + * + * We need to prepare all the bios here before we start any IO + * to ensure the size we choose is acceptable to all devices. + * The means one for each copy for write-out and an extra one for + * read-in. + * We store the read-in bio in ->master_bio and the others in + * ->devs[x].bio and ->devs[x].repl_bio. + */ + struct r10conf *conf = mddev->private; + struct r10bio *r10_bio; + sector_t next, safe, last; + int max_sectors; + int nr_sectors; + int s; + struct md_rdev *rdev; + int need_flush = 0; + struct bio *blist; + struct bio *bio, *read_bio; + int sectors_done = 0; + + if (sector_nr == 0) { + /* If restarting in the middle, skip the initial sectors */ + if (mddev->reshape_backwards && + conf->reshape_progress < raid10_size(mddev, 0, 0)) { + sector_nr = (raid10_size(mddev, 0, 0) + - conf->reshape_progress); + } else if (!mddev->reshape_backwards && + conf->reshape_progress > 0) + sector_nr = conf->reshape_progress; + if (sector_nr) { + mddev->curr_resync_completed = sector_nr; + sysfs_notify(&mddev->kobj, NULL, "sync_completed"); + *skipped = 1; + return sector_nr; + } + } + + /* We don't use sector_nr to track where we are up to + * as that doesn't work well for ->reshape_backwards. + * So just use ->reshape_progress. + */ + if (mddev->reshape_backwards) { + /* 'next' is the earliest device address that we might + * write to for this chunk in the new layout + */ + next = first_dev_address(conf->reshape_progress - 1, + &conf->geo); + + /* 'safe' is the last device address that we might read from + * in the old layout after a restart + */ + safe = last_dev_address(conf->reshape_safe - 1, + &conf->prev); + + if (next + conf->offset_diff < safe) + need_flush = 1; + + last = conf->reshape_progress - 1; + sector_nr = last & ~(sector_t)(conf->geo.chunk_mask + & conf->prev.chunk_mask); + if (sector_nr + RESYNC_BLOCK_SIZE/512 < last) + sector_nr = last + 1 - RESYNC_BLOCK_SIZE/512; + } else { + /* 'next' is after the last device address that we + * might write to for this chunk in the new layout + */ + next = last_dev_address(conf->reshape_progress, &conf->geo); + + /* 'safe' is the earliest device address that we might + * read from in the old layout after a restart + */ + safe = first_dev_address(conf->reshape_safe, &conf->prev); + + /* Need to update metadata if 'next' might be beyond 'safe' + * as that would possibly corrupt data + */ + if (next > safe + conf->offset_diff) + need_flush = 1; + + sector_nr = conf->reshape_progress; + last = sector_nr | (conf->geo.chunk_mask + & conf->prev.chunk_mask); + + if (sector_nr + RESYNC_BLOCK_SIZE/512 <= last) + last = sector_nr + RESYNC_BLOCK_SIZE/512 - 1; + } + + if (need_flush || + time_after(jiffies, conf->reshape_checkpoint + 10*HZ)) { + /* Need to update reshape_position in metadata */ + wait_barrier(conf); + mddev->reshape_position = conf->reshape_progress; + if (mddev->reshape_backwards) + mddev->curr_resync_completed = raid10_size(mddev, 0, 0) + - conf->reshape_progress; + else + mddev->curr_resync_completed = conf->reshape_progress; + conf->reshape_checkpoint = jiffies; + set_bit(MD_CHANGE_DEVS, &mddev->flags); + md_wakeup_thread(mddev->thread); + wait_event(mddev->sb_wait, mddev->flags == 0 || + kthread_should_stop()); + conf->reshape_safe = mddev->reshape_position; + allow_barrier(conf); + } + +read_more: + /* Now schedule reads for blocks from sector_nr to last */ + r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO); + raise_barrier(conf, sectors_done != 0); + atomic_set(&r10_bio->remaining, 0); + r10_bio->mddev = mddev; + r10_bio->sector = sector_nr; + set_bit(R10BIO_IsReshape, &r10_bio->state); + r10_bio->sectors = last - sector_nr + 1; + rdev = read_balance(conf, r10_bio, &max_sectors); + BUG_ON(!test_bit(R10BIO_Previous, &r10_bio->state)); + + if (!rdev) { + /* Cannot read from here, so need to record bad blocks + * on all the target devices. + */ + // FIXME + set_bit(MD_RECOVERY_INTR, &mddev->recovery); + return sectors_done; + } + + read_bio = bio_alloc_mddev(GFP_KERNEL, RESYNC_PAGES, mddev); + + read_bio->bi_bdev = rdev->bdev; + read_bio->bi_sector = (r10_bio->devs[r10_bio->read_slot].addr + + rdev->data_offset); + read_bio->bi_private = r10_bio; + read_bio->bi_end_io = end_sync_read; + read_bio->bi_rw = READ; + read_bio->bi_flags &= ~(BIO_POOL_MASK - 1); + read_bio->bi_flags |= 1 << BIO_UPTODATE; + read_bio->bi_vcnt = 0; + read_bio->bi_idx = 0; + read_bio->bi_size = 0; + r10_bio->master_bio = read_bio; + r10_bio->read_slot = r10_bio->devs[r10_bio->read_slot].devnum; + + /* Now find the locations in the new layout */ + __raid10_find_phys(&conf->geo, r10_bio); + + blist = read_bio; + read_bio->bi_next = NULL; + + for (s = 0; s < conf->copies*2; s++) { + struct bio *b; + int d = r10_bio->devs[s/2].devnum; + struct md_rdev *rdev2; + if (s&1) { + rdev2 = conf->mirrors[d].replacement; + b = r10_bio->devs[s/2].repl_bio; + } else { + rdev2 = conf->mirrors[d].rdev; + b = r10_bio->devs[s/2].bio; + } + if (!rdev2 || test_bit(Faulty, &rdev2->flags)) + continue; + b->bi_bdev = rdev2->bdev; + b->bi_sector = r10_bio->devs[s/2].addr + rdev2->new_data_offset; + b->bi_private = r10_bio; + b->bi_end_io = end_reshape_write; + b->bi_rw = WRITE; + b->bi_flags &= ~(BIO_POOL_MASK - 1); + b->bi_flags |= 1 << BIO_UPTODATE; + b->bi_next = blist; + b->bi_vcnt = 0; + b->bi_idx = 0; + b->bi_size = 0; + blist = b; + } + + /* Now add as many pages as possible to all of these bios. */ + + nr_sectors = 0; + for (s = 0 ; s < max_sectors; s += PAGE_SIZE >> 9) { + struct page *page = r10_bio->devs[0].bio->bi_io_vec[s/(PAGE_SIZE>>9)].bv_page; + int len = (max_sectors - s) << 9; + if (len > PAGE_SIZE) + len = PAGE_SIZE; + for (bio = blist; bio ; bio = bio->bi_next) { + struct bio *bio2; + if (bio_add_page(bio, page, len, 0)) + continue; + + /* Didn't fit, must stop */ + for (bio2 = blist; + bio2 && bio2 != bio; + bio2 = bio2->bi_next) { + /* Remove last page from this bio */ + bio2->bi_vcnt--; + bio2->bi_size -= len; + bio2->bi_flags &= ~(1<<BIO_SEG_VALID); + } + goto bio_full; + } + sector_nr += len >> 9; + nr_sectors += len >> 9; + } +bio_full: + r10_bio->sectors = nr_sectors; + + /* Now submit the read */ + md_sync_acct(read_bio->bi_bdev, r10_bio->sectors); + atomic_inc(&r10_bio->remaining); + read_bio->bi_next = NULL; + generic_make_request(read_bio); + sector_nr += nr_sectors; + sectors_done += nr_sectors; + if (sector_nr <= last) + goto read_more; + + /* Now that we have done the whole section we can + * update reshape_progress + */ + if (mddev->reshape_backwards) + conf->reshape_progress -= sectors_done; + else + conf->reshape_progress += sectors_done; + + return sectors_done; +} + +static void end_reshape_request(struct r10bio *r10_bio); +static int handle_reshape_read_error(struct mddev *mddev, + struct r10bio *r10_bio); +static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio) +{ + /* Reshape read completed. Hopefully we have a block + * to write out. + * If we got a read error then we do sync 1-page reads from + * elsewhere until we find the data - or give up. + */ + struct r10conf *conf = mddev->private; + int s; + + if (!test_bit(R10BIO_Uptodate, &r10_bio->state)) + if (handle_reshape_read_error(mddev, r10_bio) < 0) { + /* Reshape has been aborted */ + md_done_sync(mddev, r10_bio->sectors, 0); + return; + } + + /* We definitely have the data in the pages, schedule the + * writes. + */ + atomic_set(&r10_bio->remaining, 1); + for (s = 0; s < conf->copies*2; s++) { + struct bio *b; + int d = r10_bio->devs[s/2].devnum; + struct md_rdev *rdev; + if (s&1) { + rdev = conf->mirrors[d].replacement; + b = r10_bio->devs[s/2].repl_bio; + } else { + rdev = conf->mirrors[d].rdev; + b = r10_bio->devs[s/2].bio; + } + if (!rdev || test_bit(Faulty, &rdev->flags)) + continue; + atomic_inc(&rdev->nr_pending); + md_sync_acct(b->bi_bdev, r10_bio->sectors); + atomic_inc(&r10_bio->remaining); + b->bi_next = NULL; + generic_make_request(b); + } + end_reshape_request(r10_bio); +} + +static void end_reshape(struct r10conf *conf) +{ + if (test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) + return; + + spin_lock_irq(&conf->device_lock); + conf->prev = conf->geo; + md_finish_reshape(conf->mddev); + smp_wmb(); + conf->reshape_progress = MaxSector; + spin_unlock_irq(&conf->device_lock); + + /* read-ahead size must cover two whole stripes, which is + * 2 * (datadisks) * chunksize where 'n' is the number of raid devices + */ + if (conf->mddev->queue) { + int stripe = conf->geo.raid_disks * + ((conf->mddev->chunk_sectors << 9) / PAGE_SIZE); + stripe /= conf->geo.near_copies; + if (conf->mddev->queue->backing_dev_info.ra_pages < 2 * stripe) + conf->mddev->queue->backing_dev_info.ra_pages = 2 * stripe; + } + conf->fullsync = 0; +} + + +static int handle_reshape_read_error(struct mddev *mddev, + struct r10bio *r10_bio) +{ + /* Use sync reads to get the blocks from somewhere else */ + int sectors = r10_bio->sectors; + struct r10bio r10b; + struct r10conf *conf = mddev->private; + int slot = 0; + int idx = 0; + struct bio_vec *bvec = r10_bio->master_bio->bi_io_vec; + + r10b.sector = r10_bio->sector; + __raid10_find_phys(&conf->prev, &r10b); + + while (sectors) { + int s = sectors; + int success = 0; + int first_slot = slot; + + if (s > (PAGE_SIZE >> 9)) + s = PAGE_SIZE >> 9; + + while (!success) { + int d = r10b.devs[slot].devnum; + struct md_rdev *rdev = conf->mirrors[d].rdev; + sector_t addr; + if (rdev == NULL || + test_bit(Faulty, &rdev->flags) || + !test_bit(In_sync, &rdev->flags)) + goto failed; + + addr = r10b.devs[slot].addr + idx * PAGE_SIZE; + success = sync_page_io(rdev, + addr, + s << 9, + bvec[idx].bv_page, + READ, false); + if (success) + break; + failed: + slot++; + if (slot >= conf->copies) + slot = 0; + if (slot == first_slot) + break; + } + if (!success) { + /* couldn't read this block, must give up */ + set_bit(MD_RECOVERY_INTR, + &mddev->recovery); + return -EIO; + } + sectors -= s; + idx++; + } + return 0; +} + +static void end_reshape_write(struct bio *bio, int error) +{ + int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); + struct r10bio *r10_bio = bio->bi_private; + struct mddev *mddev = r10_bio->mddev; + struct r10conf *conf = mddev->private; + int d; + int slot; + int repl; + struct md_rdev *rdev = NULL; + + d = find_bio_disk(conf, r10_bio, bio, &slot, &repl); + if (repl) + rdev = conf->mirrors[d].replacement; + if (!rdev) { + smp_mb(); + rdev = conf->mirrors[d].rdev; + } + + if (!uptodate) { + /* FIXME should record badblock */ + md_error(mddev, rdev); + } + + rdev_dec_pending(rdev, mddev); + end_reshape_request(r10_bio); +} + +static void end_reshape_request(struct r10bio *r10_bio) +{ + if (!atomic_dec_and_test(&r10_bio->remaining)) + return; + md_done_sync(r10_bio->mddev, r10_bio->sectors, 1); + bio_put(r10_bio->master_bio); + put_buf(r10_bio); +} + +static void raid10_finish_reshape(struct mddev *mddev) +{ + struct r10conf *conf = mddev->private; + + if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) + return; + + if (mddev->delta_disks > 0) { + sector_t size = raid10_size(mddev, 0, 0); + md_set_array_sectors(mddev, size); + if (mddev->recovery_cp > mddev->resync_max_sectors) { + mddev->recovery_cp = mddev->resync_max_sectors; + set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); + } + mddev->resync_max_sectors = size; + set_capacity(mddev->gendisk, mddev->array_sectors); + revalidate_disk(mddev->gendisk); + } else { + int d; + for (d = conf->geo.raid_disks ; + d < conf->geo.raid_disks - mddev->delta_disks; + d++) { + struct md_rdev *rdev = conf->mirrors[d].rdev; + if (rdev) + clear_bit(In_sync, &rdev->flags); + rdev = conf->mirrors[d].replacement; + if (rdev) + clear_bit(In_sync, &rdev->flags); + } + } + mddev->layout = mddev->new_layout; + mddev->chunk_sectors = 1 << conf->geo.chunk_shift; + mddev->reshape_position = MaxSector; + mddev->delta_disks = 0; + mddev->reshape_backwards = 0; +} + static struct md_personality raid10_personality = { .name = "raid10", @@ -3552,6 +4534,9 @@ static struct md_personality raid10_personality = .size = raid10_size, .resize = raid10_resize, .takeover = raid10_takeover, + .check_reshape = raid10_check_reshape, + .start_reshape = raid10_start_reshape, + .finish_reshape = raid10_finish_reshape, }; static int __init raid_init(void) diff --git a/drivers/md/raid10.h b/drivers/md/raid10.h index 7c615613c381..135b1b0a1554 100644 --- a/drivers/md/raid10.h +++ b/drivers/md/raid10.h @@ -14,32 +14,38 @@ struct mirror_info { struct r10conf { struct mddev *mddev; struct mirror_info *mirrors; - int raid_disks; + struct mirror_info *mirrors_new, *mirrors_old; spinlock_t device_lock; /* geometry */ - int near_copies; /* number of copies laid out + struct geom { + int raid_disks; + int near_copies; /* number of copies laid out * raid0 style */ - int far_copies; /* number of copies laid out + int far_copies; /* number of copies laid out * at large strides across drives */ - int far_offset; /* far_copies are offset by 1 + int far_offset; /* far_copies are offset by 1 * stripe instead of many */ - int copies; /* near_copies * far_copies. - * must be <= raid_disks - */ - sector_t stride; /* distance between far copies. + sector_t stride; /* distance between far copies. * This is size / far_copies unless * far_offset, in which case it is * 1 stripe. */ + int chunk_shift; /* shift from chunks to sectors */ + sector_t chunk_mask; + } prev, geo; + int copies; /* near_copies * far_copies. + * must be <= raid_disks + */ sector_t dev_sectors; /* temp copy of * mddev->dev_sectors */ - - int chunk_shift; /* shift from chunks to sectors */ - sector_t chunk_mask; + sector_t reshape_progress; + sector_t reshape_safe; + unsigned long reshape_checkpoint; + sector_t offset_diff; struct list_head retry_list; /* queue pending writes and submit them on unplug */ @@ -136,6 +142,7 @@ enum r10bio_state { R10BIO_Uptodate, R10BIO_IsSync, R10BIO_IsRecover, + R10BIO_IsReshape, R10BIO_Degraded, /* Set ReadError on bios that experience a read error * so that raid10d knows what to do with them. @@ -146,5 +153,10 @@ enum r10bio_state { */ R10BIO_MadeGood, R10BIO_WriteError, +/* During a reshape we might be performing IO on the + * 'previous' part of the array, in which case this + * flag is set + */ + R10BIO_Previous, }; #endif diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index f351422938e0..04348d76bb30 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -196,12 +196,14 @@ static void __release_stripe(struct r5conf *conf, struct stripe_head *sh) BUG_ON(!list_empty(&sh->lru)); BUG_ON(atomic_read(&conf->active_stripes)==0); if (test_bit(STRIPE_HANDLE, &sh->state)) { - if (test_bit(STRIPE_DELAYED, &sh->state)) + if (test_bit(STRIPE_DELAYED, &sh->state) && + !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) list_add_tail(&sh->lru, &conf->delayed_list); else if (test_bit(STRIPE_BIT_DELAY, &sh->state) && sh->bm_seq - conf->seq_write > 0) list_add_tail(&sh->lru, &conf->bitmap_list); else { + clear_bit(STRIPE_DELAYED, &sh->state); clear_bit(STRIPE_BIT_DELAY, &sh->state); list_add_tail(&sh->lru, &conf->handle_list); } @@ -488,6 +490,27 @@ get_active_stripe(struct r5conf *conf, sector_t sector, return sh; } +/* Determine if 'data_offset' or 'new_data_offset' should be used + * in this stripe_head. + */ +static int use_new_offset(struct r5conf *conf, struct stripe_head *sh) +{ + sector_t progress = conf->reshape_progress; + /* Need a memory barrier to make sure we see the value + * of conf->generation, or ->data_offset that was set before + * reshape_progress was updated. + */ + smp_rmb(); + if (progress == MaxSector) + return 0; + if (sh->generation == conf->generation - 1) + return 0; + /* We are in a reshape, and this is a new-generation stripe, + * so use new_data_offset. + */ + return 1; +} + static void raid5_end_read_request(struct bio *bi, int error); static void @@ -518,6 +541,8 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s) replace_only = 1; } else continue; + if (test_and_clear_bit(R5_SyncIO, &sh->dev[i].flags)) + rw |= REQ_SYNC; bi = &sh->dev[i].req; rbi = &sh->dev[i].rreq; /* For writing to replacement */ @@ -583,6 +608,12 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s) * a chance*/ md_check_recovery(conf->mddev); } + /* + * Because md_wait_for_blocked_rdev + * will dec nr_pending, we must + * increment it first. + */ + atomic_inc(&rdev->nr_pending); md_wait_for_blocked_rdev(rdev, conf->mddev); } else { /* Acknowledged bad block - skip the write */ @@ -603,7 +634,12 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s) __func__, (unsigned long long)sh->sector, bi->bi_rw, i); atomic_inc(&sh->count); - bi->bi_sector = sh->sector + rdev->data_offset; + if (use_new_offset(conf, sh)) + bi->bi_sector = (sh->sector + + rdev->new_data_offset); + else + bi->bi_sector = (sh->sector + + rdev->data_offset); bi->bi_flags = 1 << BIO_UPTODATE; bi->bi_idx = 0; bi->bi_io_vec[0].bv_len = STRIPE_SIZE; @@ -627,7 +663,12 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s) __func__, (unsigned long long)sh->sector, rbi->bi_rw, i); atomic_inc(&sh->count); - rbi->bi_sector = sh->sector + rrdev->data_offset; + if (use_new_offset(conf, sh)) + rbi->bi_sector = (sh->sector + + rrdev->new_data_offset); + else + rbi->bi_sector = (sh->sector + + rrdev->data_offset); rbi->bi_flags = 1 << BIO_UPTODATE; rbi->bi_idx = 0; rbi->bi_io_vec[0].bv_len = STRIPE_SIZE; @@ -1114,6 +1155,8 @@ ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx) dev->sector + STRIPE_SECTORS) { if (wbi->bi_rw & REQ_FUA) set_bit(R5_WantFUA, &dev->flags); + if (wbi->bi_rw & REQ_SYNC) + set_bit(R5_SyncIO, &dev->flags); tx = async_copy_data(1, wbi, dev->page, dev->sector, tx); wbi = r5_next_bio(wbi, dev->sector); @@ -1131,13 +1174,15 @@ static void ops_complete_reconstruct(void *stripe_head_ref) int pd_idx = sh->pd_idx; int qd_idx = sh->qd_idx; int i; - bool fua = false; + bool fua = false, sync = false; pr_debug("%s: stripe %llu\n", __func__, (unsigned long long)sh->sector); - for (i = disks; i--; ) + for (i = disks; i--; ) { fua |= test_bit(R5_WantFUA, &sh->dev[i].flags); + sync |= test_bit(R5_SyncIO, &sh->dev[i].flags); + } for (i = disks; i--; ) { struct r5dev *dev = &sh->dev[i]; @@ -1146,6 +1191,8 @@ static void ops_complete_reconstruct(void *stripe_head_ref) set_bit(R5_UPTODATE, &dev->flags); if (fua) set_bit(R5_WantFUA, &dev->flags); + if (sync) + set_bit(R5_SyncIO, &dev->flags); } } @@ -1648,7 +1695,7 @@ static void raid5_end_read_request(struct bio * bi, int error) int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags); char b[BDEVNAME_SIZE]; struct md_rdev *rdev = NULL; - + sector_t s; for (i=0 ; i<disks; i++) if (bi == &sh->dev[i].req) @@ -1671,6 +1718,10 @@ static void raid5_end_read_request(struct bio * bi, int error) if (!rdev) rdev = conf->disks[i].rdev; + if (use_new_offset(conf, sh)) + s = sh->sector + rdev->new_data_offset; + else + s = sh->sector + rdev->data_offset; if (uptodate) { set_bit(R5_UPTODATE, &sh->dev[i].flags); if (test_bit(R5_ReadError, &sh->dev[i].flags)) { @@ -1683,8 +1734,7 @@ static void raid5_end_read_request(struct bio * bi, int error) "md/raid:%s: read error corrected" " (%lu sectors at %llu on %s)\n", mdname(conf->mddev), STRIPE_SECTORS, - (unsigned long long)(sh->sector - + rdev->data_offset), + (unsigned long long)s, bdevname(rdev->bdev, b)); atomic_add(STRIPE_SECTORS, &rdev->corrected_errors); clear_bit(R5_ReadError, &sh->dev[i].flags); @@ -1695,6 +1745,7 @@ static void raid5_end_read_request(struct bio * bi, int error) } else { const char *bdn = bdevname(rdev->bdev, b); int retry = 0; + int set_bad = 0; clear_bit(R5_UPTODATE, &sh->dev[i].flags); atomic_inc(&rdev->read_errors); @@ -1704,29 +1755,28 @@ static void raid5_end_read_request(struct bio * bi, int error) "md/raid:%s: read error on replacement device " "(sector %llu on %s).\n", mdname(conf->mddev), - (unsigned long long)(sh->sector - + rdev->data_offset), + (unsigned long long)s, bdn); - else if (conf->mddev->degraded >= conf->max_degraded) + else if (conf->mddev->degraded >= conf->max_degraded) { + set_bad = 1; printk_ratelimited( KERN_WARNING "md/raid:%s: read error not correctable " "(sector %llu on %s).\n", mdname(conf->mddev), - (unsigned long long)(sh->sector - + rdev->data_offset), + (unsigned long long)s, bdn); - else if (test_bit(R5_ReWrite, &sh->dev[i].flags)) + } else if (test_bit(R5_ReWrite, &sh->dev[i].flags)) { /* Oh, no!!! */ + set_bad = 1; printk_ratelimited( KERN_WARNING "md/raid:%s: read error NOT corrected!! " "(sector %llu on %s).\n", mdname(conf->mddev), - (unsigned long long)(sh->sector - + rdev->data_offset), + (unsigned long long)s, bdn); - else if (atomic_read(&rdev->read_errors) + } else if (atomic_read(&rdev->read_errors) > conf->max_nr_stripes) printk(KERN_WARNING "md/raid:%s: Too many read errors, failing device %s.\n", @@ -1738,7 +1788,11 @@ static void raid5_end_read_request(struct bio * bi, int error) else { clear_bit(R5_ReadError, &sh->dev[i].flags); clear_bit(R5_ReWrite, &sh->dev[i].flags); - md_error(conf->mddev, rdev); + if (!(set_bad + && test_bit(In_sync, &rdev->flags) + && rdev_set_badblocks( + rdev, sh->sector, STRIPE_SECTORS, 0))) + md_error(conf->mddev, rdev); } } rdev_dec_pending(rdev, conf->mddev); @@ -3543,8 +3597,18 @@ static void handle_stripe(struct stripe_head *sh) finish: /* wait for this device to become unblocked */ - if (conf->mddev->external && unlikely(s.blocked_rdev)) - md_wait_for_blocked_rdev(s.blocked_rdev, conf->mddev); + if (unlikely(s.blocked_rdev)) { + if (conf->mddev->external) + md_wait_for_blocked_rdev(s.blocked_rdev, + conf->mddev); + else + /* Internal metadata will immediately + * be written by raid5d, so we don't + * need to wait here. + */ + rdev_dec_pending(s.blocked_rdev, + conf->mddev); + } if (s.handle_bad_blocks) for (i = disks; i--; ) { @@ -3561,7 +3625,7 @@ finish: if (test_and_clear_bit(R5_MadeGood, &dev->flags)) { rdev = conf->disks[i].rdev; rdev_clear_badblocks(rdev, sh->sector, - STRIPE_SECTORS); + STRIPE_SECTORS, 0); rdev_dec_pending(rdev, conf->mddev); } if (test_and_clear_bit(R5_MadeGoodRepl, &dev->flags)) { @@ -3570,7 +3634,7 @@ finish: /* rdev have been moved down */ rdev = conf->disks[i].rdev; rdev_clear_badblocks(rdev, sh->sector, - STRIPE_SECTORS); + STRIPE_SECTORS, 0); rdev_dec_pending(rdev, conf->mddev); } } @@ -3842,7 +3906,6 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio) raid_bio->bi_next = (void*)rdev; align_bi->bi_bdev = rdev->bdev; align_bi->bi_flags &= ~(1 << BIO_SEG_VALID); - align_bi->bi_sector += rdev->data_offset; if (!bio_fits_rdev(align_bi) || is_badblock(rdev, align_bi->bi_sector, align_bi->bi_size>>9, @@ -3853,6 +3916,9 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio) return 0; } + /* No reshape active, so we can trust rdev->data_offset */ + align_bi->bi_sector += rdev->data_offset; + spin_lock_irq(&conf->device_lock); wait_event_lock_irq(conf->wait_for_stripe, conf->quiesce == 0, @@ -3931,7 +3997,6 @@ static void make_request(struct mddev *mddev, struct bio * bi) struct stripe_head *sh; const int rw = bio_data_dir(bi); int remaining; - int plugged; if (unlikely(bi->bi_rw & REQ_FLUSH)) { md_flush_request(mddev, bi); @@ -3950,15 +4015,12 @@ static void make_request(struct mddev *mddev, struct bio * bi) bi->bi_next = NULL; bi->bi_phys_segments = 1; /* over-loaded to count active stripes */ - plugged = mddev_check_plugged(mddev); for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) { DEFINE_WAIT(w); - int disks, data_disks; int previous; retry: previous = 0; - disks = conf->raid_disks; prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE); if (unlikely(conf->reshape_progress != MaxSector)) { /* spinlock is needed as reshape_progress may be @@ -3970,13 +4032,12 @@ static void make_request(struct mddev *mddev, struct bio * bi) * to check again. */ spin_lock_irq(&conf->device_lock); - if (mddev->delta_disks < 0 + if (mddev->reshape_backwards ? logical_sector < conf->reshape_progress : logical_sector >= conf->reshape_progress) { - disks = conf->previous_raid_disks; previous = 1; } else { - if (mddev->delta_disks < 0 + if (mddev->reshape_backwards ? logical_sector < conf->reshape_safe : logical_sector >= conf->reshape_safe) { spin_unlock_irq(&conf->device_lock); @@ -3986,7 +4047,6 @@ static void make_request(struct mddev *mddev, struct bio * bi) } spin_unlock_irq(&conf->device_lock); } - data_disks = disks - conf->max_degraded; new_sector = raid5_compute_sector(conf, logical_sector, previous, @@ -4009,7 +4069,7 @@ static void make_request(struct mddev *mddev, struct bio * bi) */ int must_retry = 0; spin_lock_irq(&conf->device_lock); - if (mddev->delta_disks < 0 + if (mddev->reshape_backwards ? logical_sector >= conf->reshape_progress : logical_sector < conf->reshape_progress) /* mismatch, need to try again */ @@ -4056,6 +4116,7 @@ static void make_request(struct mddev *mddev, struct bio * bi) if ((bi->bi_rw & REQ_SYNC) && !test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) atomic_inc(&conf->preread_active_stripes); + mddev_check_plugged(mddev); release_stripe(sh); } else { /* cannot get stripe for read-ahead, just give-up */ @@ -4063,10 +4124,7 @@ static void make_request(struct mddev *mddev, struct bio * bi) finish_wait(&conf->wait_for_overlap, &w); break; } - } - if (!plugged) - md_wakeup_thread(mddev->thread); spin_lock_irq(&conf->device_lock); remaining = raid5_dec_bi_phys_segments(bi); @@ -4108,11 +4166,11 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk if (sector_nr == 0) { /* If restarting in the middle, skip the initial sectors */ - if (mddev->delta_disks < 0 && + if (mddev->reshape_backwards && conf->reshape_progress < raid5_size(mddev, 0, 0)) { sector_nr = raid5_size(mddev, 0, 0) - conf->reshape_progress; - } else if (mddev->delta_disks >= 0 && + } else if (!mddev->reshape_backwards && conf->reshape_progress > 0) sector_nr = conf->reshape_progress; sector_div(sector_nr, new_data_disks); @@ -4133,13 +4191,11 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk else reshape_sectors = mddev->chunk_sectors; - /* we update the metadata when there is more than 3Meg - * in the block range (that is rather arbitrary, should - * probably be time based) or when the data about to be - * copied would over-write the source of the data at - * the front of the range. - * i.e. one new_stripe along from reshape_progress new_maps - * to after where reshape_safe old_maps to + /* We update the metadata at least every 10 seconds, or when + * the data about to be copied would over-write the source of + * the data at the front of the range. i.e. one new_stripe + * along from reshape_progress new_maps to after where + * reshape_safe old_maps to */ writepos = conf->reshape_progress; sector_div(writepos, new_data_disks); @@ -4147,7 +4203,7 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk sector_div(readpos, data_disks); safepos = conf->reshape_safe; sector_div(safepos, data_disks); - if (mddev->delta_disks < 0) { + if (mddev->reshape_backwards) { writepos -= min_t(sector_t, reshape_sectors, writepos); readpos += reshape_sectors; safepos += reshape_sectors; @@ -4157,11 +4213,29 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk safepos -= min_t(sector_t, reshape_sectors, safepos); } + /* Having calculated the 'writepos' possibly use it + * to set 'stripe_addr' which is where we will write to. + */ + if (mddev->reshape_backwards) { + BUG_ON(conf->reshape_progress == 0); + stripe_addr = writepos; + BUG_ON((mddev->dev_sectors & + ~((sector_t)reshape_sectors - 1)) + - reshape_sectors - stripe_addr + != sector_nr); + } else { + BUG_ON(writepos != sector_nr + reshape_sectors); + stripe_addr = sector_nr; + } + /* 'writepos' is the most advanced device address we might write. * 'readpos' is the least advanced device address we might read. * 'safepos' is the least address recorded in the metadata as having * been reshaped. - * If 'readpos' is behind 'writepos', then there is no way that we can + * If there is a min_offset_diff, these are adjusted either by + * increasing the safepos/readpos if diff is negative, or + * increasing writepos if diff is positive. + * If 'readpos' is then behind 'writepos', there is no way that we can * ensure safety in the face of a crash - that must be done by userspace * making a backup of the data. So in that case there is no particular * rush to update metadata. @@ -4174,7 +4248,13 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk * Maybe that number should be configurable, but I'm not sure it is * worth it.... maybe it could be a multiple of safemode_delay??? */ - if ((mddev->delta_disks < 0 + if (conf->min_offset_diff < 0) { + safepos += -conf->min_offset_diff; + readpos += -conf->min_offset_diff; + } else + writepos += conf->min_offset_diff; + + if ((mddev->reshape_backwards ? (safepos > writepos && readpos < writepos) : (safepos < writepos && readpos > writepos)) || time_after(jiffies, conf->reshape_checkpoint + 10*HZ)) { @@ -4195,17 +4275,6 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk sysfs_notify(&mddev->kobj, NULL, "sync_completed"); } - if (mddev->delta_disks < 0) { - BUG_ON(conf->reshape_progress == 0); - stripe_addr = writepos; - BUG_ON((mddev->dev_sectors & - ~((sector_t)reshape_sectors - 1)) - - reshape_sectors - stripe_addr - != sector_nr); - } else { - BUG_ON(writepos != sector_nr + reshape_sectors); - stripe_addr = sector_nr; - } INIT_LIST_HEAD(&stripes); for (i = 0; i < reshape_sectors; i += STRIPE_SECTORS) { int j; @@ -4239,7 +4308,7 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk list_add(&sh->lru, &stripes); } spin_lock_irq(&conf->device_lock); - if (mddev->delta_disks < 0) + if (mddev->reshape_backwards) conf->reshape_progress -= reshape_sectors * new_data_disks; else conf->reshape_progress += reshape_sectors * new_data_disks; @@ -4776,6 +4845,7 @@ static struct r5conf *setup_conf(struct mddev *mddev) int raid_disk, memory, max_disks; struct md_rdev *rdev; struct disk_info *disk; + char pers_name[6]; if (mddev->new_level != 5 && mddev->new_level != 4 @@ -4899,7 +4969,8 @@ static struct r5conf *setup_conf(struct mddev *mddev) printk(KERN_INFO "md/raid:%s: allocated %dkB\n", mdname(mddev), memory); - conf->thread = md_register_thread(raid5d, mddev, NULL); + sprintf(pers_name, "raid%d", mddev->new_level); + conf->thread = md_register_thread(raid5d, mddev, pers_name); if (!conf->thread) { printk(KERN_ERR "md/raid:%s: couldn't allocate thread.\n", @@ -4952,16 +5023,42 @@ static int run(struct mddev *mddev) struct md_rdev *rdev; sector_t reshape_offset = 0; int i; + long long min_offset_diff = 0; + int first = 1; if (mddev->recovery_cp != MaxSector) printk(KERN_NOTICE "md/raid:%s: not clean" " -- starting background reconstruction\n", mdname(mddev)); + + rdev_for_each(rdev, mddev) { + long long diff; + if (rdev->raid_disk < 0) + continue; + diff = (rdev->new_data_offset - rdev->data_offset); + if (first) { + min_offset_diff = diff; + first = 0; + } else if (mddev->reshape_backwards && + diff < min_offset_diff) + min_offset_diff = diff; + else if (!mddev->reshape_backwards && + diff > min_offset_diff) + min_offset_diff = diff; + } + if (mddev->reshape_position != MaxSector) { /* Check that we can continue the reshape. - * Currently only disks can change, it must - * increase, and we must be past the point where - * a stripe over-writes itself + * Difficulties arise if the stripe we would write to + * next is at or after the stripe we would read from next. + * For a reshape that changes the number of devices, this + * is only possible for a very short time, and mdadm makes + * sure that time appears to have past before assembling + * the array. So we fail if that time hasn't passed. + * For a reshape that keeps the number of devices the same + * mdadm must be monitoring the reshape can keeping the + * critical areas read-only and backed up. It will start + * the array in read-only mode, so we check for that. */ sector_t here_new, here_old; int old_disks; @@ -4993,26 +5090,34 @@ static int run(struct mddev *mddev) /* here_old is the first stripe that we might need to read * from */ if (mddev->delta_disks == 0) { + if ((here_new * mddev->new_chunk_sectors != + here_old * mddev->chunk_sectors)) { + printk(KERN_ERR "md/raid:%s: reshape position is" + " confused - aborting\n", mdname(mddev)); + return -EINVAL; + } /* We cannot be sure it is safe to start an in-place - * reshape. It is only safe if user-space if monitoring + * reshape. It is only safe if user-space is monitoring * and taking constant backups. * mdadm always starts a situation like this in * readonly mode so it can take control before * allowing any writes. So just check for that. */ - if ((here_new * mddev->new_chunk_sectors != - here_old * mddev->chunk_sectors) || - mddev->ro == 0) { - printk(KERN_ERR "md/raid:%s: in-place reshape must be started" - " in read-only mode - aborting\n", + if (abs(min_offset_diff) >= mddev->chunk_sectors && + abs(min_offset_diff) >= mddev->new_chunk_sectors) + /* not really in-place - so OK */; + else if (mddev->ro == 0) { + printk(KERN_ERR "md/raid:%s: in-place reshape " + "must be started in read-only mode " + "- aborting\n", mdname(mddev)); return -EINVAL; } - } else if (mddev->delta_disks < 0 - ? (here_new * mddev->new_chunk_sectors <= + } else if (mddev->reshape_backwards + ? (here_new * mddev->new_chunk_sectors + min_offset_diff <= here_old * mddev->chunk_sectors) : (here_new * mddev->new_chunk_sectors >= - here_old * mddev->chunk_sectors)) { + here_old * mddev->chunk_sectors + (-min_offset_diff))) { /* Reading from the same stripe as writing to - bad */ printk(KERN_ERR "md/raid:%s: reshape_position too early for " "auto-recovery - aborting.\n", @@ -5037,6 +5142,7 @@ static int run(struct mddev *mddev) if (IS_ERR(conf)) return PTR_ERR(conf); + conf->min_offset_diff = min_offset_diff; mddev->thread = conf->thread; conf->thread = NULL; mddev->private = conf; @@ -5182,9 +5288,12 @@ static int run(struct mddev *mddev) blk_queue_io_opt(mddev->queue, chunk_size * (conf->raid_disks - conf->max_degraded)); - rdev_for_each(rdev, mddev) + rdev_for_each(rdev, mddev) { disk_stack_limits(mddev->gendisk, rdev->bdev, rdev->data_offset << 9); + disk_stack_limits(mddev->gendisk, rdev->bdev, + rdev->new_data_offset << 9); + } } return 0; @@ -5380,10 +5489,9 @@ static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev) if (rdev->saved_raid_disk >= 0 && rdev->saved_raid_disk >= first && conf->disks[rdev->saved_raid_disk].rdev == NULL) - disk = rdev->saved_raid_disk; - else - disk = first; - for ( ; disk <= last ; disk++) { + first = rdev->saved_raid_disk; + + for (disk = first; disk <= last; disk++) { p = conf->disks + disk; if (p->rdev == NULL) { clear_bit(In_sync, &rdev->flags); @@ -5392,8 +5500,11 @@ static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev) if (rdev->saved_raid_disk != disk) conf->fullsync = 1; rcu_assign_pointer(p->rdev, rdev); - break; + goto out; } + } + for (disk = first; disk <= last; disk++) { + p = conf->disks + disk; if (test_bit(WantReplacement, &p->rdev->flags) && p->replacement == NULL) { clear_bit(In_sync, &rdev->flags); @@ -5405,6 +5516,7 @@ static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev) break; } } +out: print_raid5_conf(conf); return err; } @@ -5418,12 +5530,18 @@ static int raid5_resize(struct mddev *mddev, sector_t sectors) * any io in the removed space completes, but it hardly seems * worth it. */ + sector_t newsize; sectors &= ~((sector_t)mddev->chunk_sectors - 1); - md_set_array_sectors(mddev, raid5_size(mddev, sectors, - mddev->raid_disks)); - if (mddev->array_sectors > - raid5_size(mddev, sectors, mddev->raid_disks)) + newsize = raid5_size(mddev, sectors, mddev->raid_disks); + if (mddev->external_size && + mddev->array_sectors > newsize) return -EINVAL; + if (mddev->bitmap) { + int ret = bitmap_resize(mddev->bitmap, sectors, 0, 0); + if (ret) + return ret; + } + md_set_array_sectors(mddev, newsize); set_capacity(mddev->gendisk, mddev->array_sectors); revalidate_disk(mddev->gendisk); if (sectors > mddev->dev_sectors && @@ -5468,9 +5586,6 @@ static int check_reshape(struct mddev *mddev) mddev->new_layout == mddev->layout && mddev->new_chunk_sectors == mddev->chunk_sectors) return 0; /* nothing to do */ - if (mddev->bitmap) - /* Cannot grow a bitmap yet */ - return -EBUSY; if (has_failed(conf)) return -EINVAL; if (mddev->delta_disks < 0) { @@ -5505,10 +5620,14 @@ static int raid5_start_reshape(struct mddev *mddev) if (!check_stripe_cache(mddev)) return -ENOSPC; - rdev_for_each(rdev, mddev) + if (has_failed(conf)) + return -EINVAL; + + rdev_for_each(rdev, mddev) { if (!test_bit(In_sync, &rdev->flags) && !test_bit(Faulty, &rdev->flags)) spares++; + } if (spares - mddev->degraded < mddev->delta_disks - conf->max_degraded) /* Not enough devices even to make a degraded array @@ -5535,12 +5654,16 @@ static int raid5_start_reshape(struct mddev *mddev) conf->chunk_sectors = mddev->new_chunk_sectors; conf->prev_algo = conf->algorithm; conf->algorithm = mddev->new_layout; - if (mddev->delta_disks < 0) + conf->generation++; + /* Code that selects data_offset needs to see the generation update + * if reshape_progress has been set - so a memory barrier needed. + */ + smp_mb(); + if (mddev->reshape_backwards) conf->reshape_progress = raid5_size(mddev, 0, 0); else conf->reshape_progress = 0; conf->reshape_safe = conf->reshape_progress; - conf->generation++; spin_unlock_irq(&conf->device_lock); /* Add some new drives, as many as will fit. @@ -5592,6 +5715,9 @@ static int raid5_start_reshape(struct mddev *mddev) mddev->recovery = 0; spin_lock_irq(&conf->device_lock); mddev->raid_disks = conf->raid_disks = conf->previous_raid_disks; + rdev_for_each(rdev, mddev) + rdev->new_data_offset = rdev->data_offset; + smp_wmb(); conf->reshape_progress = MaxSector; mddev->reshape_position = MaxSector; spin_unlock_irq(&conf->device_lock); @@ -5610,9 +5736,13 @@ static void end_reshape(struct r5conf *conf) { if (!test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) { + struct md_rdev *rdev; spin_lock_irq(&conf->device_lock); conf->previous_raid_disks = conf->raid_disks; + rdev_for_each(rdev, conf->mddev) + rdev->data_offset = rdev->new_data_offset; + smp_wmb(); conf->reshape_progress = MaxSector; spin_unlock_irq(&conf->device_lock); wake_up(&conf->wait_for_overlap); @@ -5652,17 +5782,18 @@ static void raid5_finish_reshape(struct mddev *mddev) d < conf->raid_disks - mddev->delta_disks; d++) { struct md_rdev *rdev = conf->disks[d].rdev; - if (rdev && - raid5_remove_disk(mddev, rdev) == 0) { - sysfs_unlink_rdev(mddev, rdev); - rdev->raid_disk = -1; - } + if (rdev) + clear_bit(In_sync, &rdev->flags); + rdev = conf->disks[d].replacement; + if (rdev) + clear_bit(In_sync, &rdev->flags); } } mddev->layout = conf->algorithm; mddev->chunk_sectors = conf->chunk_sectors; mddev->reshape_position = MaxSector; mddev->delta_disks = 0; + mddev->reshape_backwards = 0; } } diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h index 8d8e13934a48..2164021f3b5f 100644 --- a/drivers/md/raid5.h +++ b/drivers/md/raid5.h @@ -285,6 +285,7 @@ enum r5dev_flags { */ R5_Wantdrain, /* dev->towrite needs to be drained */ R5_WantFUA, /* Write should be FUA */ + R5_SyncIO, /* The IO is sync */ R5_WriteError, /* got a write error - need to record it */ R5_MadeGood, /* A bad block has been fixed by writing to it */ R5_ReadRepl, /* Will/did read from replacement rather than orig */ @@ -385,6 +386,12 @@ struct r5conf { short generation; /* increments with every reshape */ unsigned long reshape_checkpoint; /* Time we last updated * metadata */ + long long min_offset_diff; /* minimum difference between + * data_offset and + * new_data_offset across all + * devices. May be negative, + * but is closest to zero. + */ struct list_head handle_list; /* stripes needing handling */ struct list_head hold_list; /* preread ready stripes */ |