summaryrefslogtreecommitdiff
path: root/drivers/md/md.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md/md.c')
-rw-r--r--drivers/md/md.c76
1 files changed, 13 insertions, 63 deletions
diff --git a/drivers/md/md.c b/drivers/md/md.c
index d5ab4493c8be..3f6203a4c7ea 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -498,61 +498,13 @@ void md_flush_request(struct mddev *mddev, struct bio *bio)
}
EXPORT_SYMBOL(md_flush_request);
-/* Support for plugging.
- * This mirrors the plugging support in request_queue, but does not
- * require having a whole queue or request structures.
- * We allocate an md_plug_cb for each md device and each thread it gets
- * plugged on. This links tot the private plug_handle structure in the
- * personality data where we keep a count of the number of outstanding
- * plugs so other code can see if a plug is active.
- */
-struct md_plug_cb {
- struct blk_plug_cb cb;
- struct mddev *mddev;
-};
-
-static void plugger_unplug(struct blk_plug_cb *cb)
+void md_unplug(struct blk_plug_cb *cb, bool from_schedule)
{
- struct md_plug_cb *mdcb = container_of(cb, struct md_plug_cb, cb);
- if (atomic_dec_and_test(&mdcb->mddev->plug_cnt))
- md_wakeup_thread(mdcb->mddev->thread);
- kfree(mdcb);
-}
-
-/* Check that an unplug wakeup will come shortly.
- * If not, wakeup the md thread immediately
- */
-int mddev_check_plugged(struct mddev *mddev)
-{
- struct blk_plug *plug = current->plug;
- struct md_plug_cb *mdcb;
-
- if (!plug)
- return 0;
-
- list_for_each_entry(mdcb, &plug->cb_list, cb.list) {
- if (mdcb->cb.callback == plugger_unplug &&
- mdcb->mddev == mddev) {
- /* Already on the list, move to top */
- if (mdcb != list_first_entry(&plug->cb_list,
- struct md_plug_cb,
- cb.list))
- list_move(&mdcb->cb.list, &plug->cb_list);
- return 1;
- }
- }
- /* Not currently on the callback list */
- mdcb = kmalloc(sizeof(*mdcb), GFP_ATOMIC);
- if (!mdcb)
- return 0;
-
- mdcb->mddev = mddev;
- mdcb->cb.callback = plugger_unplug;
- atomic_inc(&mddev->plug_cnt);
- list_add(&mdcb->cb.list, &plug->cb_list);
- return 1;
+ struct mddev *mddev = cb->data;
+ md_wakeup_thread(mddev->thread);
+ kfree(cb);
}
-EXPORT_SYMBOL_GPL(mddev_check_plugged);
+EXPORT_SYMBOL(md_unplug);
static inline struct mddev *mddev_get(struct mddev *mddev)
{
@@ -602,7 +554,6 @@ void mddev_init(struct mddev *mddev)
atomic_set(&mddev->active, 1);
atomic_set(&mddev->openers, 0);
atomic_set(&mddev->active_io, 0);
- atomic_set(&mddev->plug_cnt, 0);
spin_lock_init(&mddev->write_lock);
atomic_set(&mddev->flush_pending, 0);
init_waitqueue_head(&mddev->sb_wait);
@@ -1157,8 +1108,11 @@ static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor
ret = 0;
}
rdev->sectors = rdev->sb_start;
- /* Limit to 4TB as metadata cannot record more than that */
- if (rdev->sectors >= (2ULL << 32))
+ /* Limit to 4TB as metadata cannot record more than that.
+ * (not needed for Linear and RAID0 as metadata doesn't
+ * record this size)
+ */
+ if (rdev->sectors >= (2ULL << 32) && sb->level >= 1)
rdev->sectors = (2ULL << 32) - 2;
if (rdev->sectors < ((sector_t)sb->size) * 2 && sb->level >= 1)
@@ -1449,7 +1403,7 @@ super_90_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
/* Limit to 4TB as metadata cannot record more than that.
* 4TB == 2^32 KB, or 2*2^32 sectors.
*/
- if (num_sectors >= (2ULL << 32))
+ if (num_sectors >= (2ULL << 32) && rdev->mddev->level >= 1)
num_sectors = (2ULL << 32) - 2;
md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
rdev->sb_page);
@@ -3942,17 +3896,13 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
break;
case clear:
/* stopping an active array */
- if (atomic_read(&mddev->openers) > 0)
- return -EBUSY;
err = do_md_stop(mddev, 0, NULL);
break;
case inactive:
/* stopping an active array */
- if (mddev->pers) {
- if (atomic_read(&mddev->openers) > 0)
- return -EBUSY;
+ if (mddev->pers)
err = do_md_stop(mddev, 2, NULL);
- } else
+ else
err = 0; /* already inactive */
break;
case suspended: