diff options
Diffstat (limited to 'drivers/md')
36 files changed, 2065 insertions, 326 deletions
diff --git a/drivers/md/Makefile b/drivers/md/Makefile index 5ef78efc27f2..2acc43fe0229 100644 --- a/drivers/md/Makefile +++ b/drivers/md/Makefile @@ -3,7 +3,7 @@ # dm-mod-y += dm.o dm-table.o dm-target.o dm-linear.o dm-stripe.o \ - dm-ioctl.o dm-io.o dm-kcopyd.o dm-sysfs.o + dm-ioctl.o dm-io.o dm-kcopyd.o dm-sysfs.o dm-stats.o dm-multipath-y += dm-path-selector.o dm-mpath.o dm-snapshot-y += dm-snap.o dm-exception-store.o dm-snap-transient.o \ dm-snap-persistent.o diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h index b39f6f0b45f2..0f12382aa35d 100644 --- a/drivers/md/bcache/bcache.h +++ b/drivers/md/bcache/bcache.h @@ -498,7 +498,7 @@ struct cached_dev { */ atomic_t has_dirty; - struct ratelimit writeback_rate; + struct bch_ratelimit writeback_rate; struct delayed_work writeback_rate_update; /* @@ -507,10 +507,9 @@ struct cached_dev { */ sector_t last_read; - /* Number of writeback bios in flight */ - atomic_t in_flight; + /* Limit number of writeback bios in flight */ + struct semaphore in_flight; struct closure_with_timer writeback; - struct closure_waitlist writeback_wait; struct keybuf writeback_keys; diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c index 8010eed06a51..22d1ae72c282 100644 --- a/drivers/md/bcache/bset.c +++ b/drivers/md/bcache/bset.c @@ -926,28 +926,45 @@ struct bkey *bch_next_recurse_key(struct btree *b, struct bkey *search) /* Mergesort */ +static void sort_key_next(struct btree_iter *iter, + struct btree_iter_set *i) +{ + i->k = bkey_next(i->k); + + if (i->k == i->end) + *i = iter->data[--iter->used]; +} + static void btree_sort_fixup(struct btree_iter *iter) { while (iter->used > 1) { struct btree_iter_set *top = iter->data, *i = top + 1; - struct bkey *k; if (iter->used > 2 && btree_iter_cmp(i[0], i[1])) i++; - for (k = i->k; - k != i->end && bkey_cmp(top->k, &START_KEY(k)) > 0; - k = bkey_next(k)) - if (top->k > i->k) - __bch_cut_front(top->k, k); - else if (KEY_SIZE(k)) - bch_cut_back(&START_KEY(k), top->k); - - if (top->k < i->k || k == i->k) + if (bkey_cmp(top->k, &START_KEY(i->k)) <= 0) break; - heap_sift(iter, i - top, btree_iter_cmp); + if (!KEY_SIZE(i->k)) { + sort_key_next(iter, i); + heap_sift(iter, i - top, btree_iter_cmp); + continue; + } + + if (top->k > i->k) { + if (bkey_cmp(top->k, i->k) >= 0) + sort_key_next(iter, i); + else + bch_cut_front(top->k, i->k); + + heap_sift(iter, i - top, btree_iter_cmp); + } else { + /* can't happen because of comparison func */ + BUG_ON(!bkey_cmp(&START_KEY(top->k), &START_KEY(i->k))); + bch_cut_back(&START_KEY(i->k), top->k); + } } } diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c index ee372884c405..f42fc7ed9cd6 100644 --- a/drivers/md/bcache/btree.c +++ b/drivers/md/bcache/btree.c @@ -255,7 +255,7 @@ void bch_btree_node_read(struct btree *b) return; err: - bch_cache_set_error(b->c, "io error reading bucket %lu", + bch_cache_set_error(b->c, "io error reading bucket %zu", PTR_BUCKET_NR(b->c, &b->key, 0)); } @@ -597,27 +597,22 @@ static int mca_reap(struct btree *b, struct closure *cl, unsigned min_order) return 0; } -static int bch_mca_shrink(struct shrinker *shrink, struct shrink_control *sc) +static unsigned long bch_mca_scan(struct shrinker *shrink, + struct shrink_control *sc) { struct cache_set *c = container_of(shrink, struct cache_set, shrink); struct btree *b, *t; unsigned long i, nr = sc->nr_to_scan; + unsigned long freed = 0; if (c->shrinker_disabled) - return 0; + return SHRINK_STOP; if (c->try_harder) - return 0; - - /* - * If nr == 0, we're supposed to return the number of items we have - * cached. Not allowed to return -1. - */ - if (!nr) - return mca_can_free(c) * c->btree_pages; + return SHRINK_STOP; /* Return -1 if we can't do anything right now */ - if (sc->gfp_mask & __GFP_WAIT) + if (sc->gfp_mask & __GFP_IO) mutex_lock(&c->bucket_lock); else if (!mutex_trylock(&c->bucket_lock)) return -1; @@ -634,14 +629,14 @@ static int bch_mca_shrink(struct shrinker *shrink, struct shrink_control *sc) i = 0; list_for_each_entry_safe(b, t, &c->btree_cache_freeable, list) { - if (!nr) + if (freed >= nr) break; if (++i > 3 && !mca_reap(b, NULL, 0)) { mca_data_free(b); rw_unlock(true, b); - --nr; + freed++; } } @@ -652,7 +647,7 @@ static int bch_mca_shrink(struct shrinker *shrink, struct shrink_control *sc) if (list_empty(&c->btree_cache)) goto out; - for (i = 0; nr && i < c->bucket_cache_used; i++) { + for (i = 0; (nr--) && i < c->bucket_cache_used; i++) { b = list_first_entry(&c->btree_cache, struct btree, list); list_rotate_left(&c->btree_cache); @@ -661,14 +656,27 @@ static int bch_mca_shrink(struct shrinker *shrink, struct shrink_control *sc) mca_bucket_free(b); mca_data_free(b); rw_unlock(true, b); - --nr; + freed++; } else b->accessed = 0; } out: - nr = mca_can_free(c) * c->btree_pages; mutex_unlock(&c->bucket_lock); - return nr; + return freed; +} + +static unsigned long bch_mca_count(struct shrinker *shrink, + struct shrink_control *sc) +{ + struct cache_set *c = container_of(shrink, struct cache_set, shrink); + + if (c->shrinker_disabled) + return 0; + + if (c->try_harder) + return 0; + + return mca_can_free(c) * c->btree_pages; } void bch_btree_cache_free(struct cache_set *c) @@ -737,7 +745,8 @@ int bch_btree_cache_alloc(struct cache_set *c) c->verify_data = NULL; #endif - c->shrink.shrink = bch_mca_shrink; + c->shrink.count_objects = bch_mca_count; + c->shrink.scan_objects = bch_mca_scan; c->shrink.seeks = 4; c->shrink.batch = c->btree_pages * 2; register_shrinker(&c->shrink); diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c index ba95ab84b2be..8435f81e5d85 100644 --- a/drivers/md/bcache/journal.c +++ b/drivers/md/bcache/journal.c @@ -153,7 +153,8 @@ int bch_journal_read(struct cache_set *c, struct list_head *list, bitmap_zero(bitmap, SB_JOURNAL_BUCKETS); pr_debug("%u journal buckets", ca->sb.njournal_buckets); - /* Read journal buckets ordered by golden ratio hash to quickly + /* + * Read journal buckets ordered by golden ratio hash to quickly * find a sequence of buckets with valid journal entries */ for (i = 0; i < ca->sb.njournal_buckets; i++) { @@ -166,18 +167,20 @@ int bch_journal_read(struct cache_set *c, struct list_head *list, goto bsearch; } - /* If that fails, check all the buckets we haven't checked + /* + * If that fails, check all the buckets we haven't checked * already */ pr_debug("falling back to linear search"); - for (l = 0; l < ca->sb.njournal_buckets; l++) { - if (test_bit(l, bitmap)) - continue; - + for (l = find_first_zero_bit(bitmap, ca->sb.njournal_buckets); + l < ca->sb.njournal_buckets; + l = find_next_zero_bit(bitmap, ca->sb.njournal_buckets, l + 1)) if (read_bucket(l)) goto bsearch; - } + + if (list_empty(list)) + continue; bsearch: /* Binary search */ m = r = find_next_bit(bitmap, ca->sb.njournal_buckets, l + 1); @@ -197,10 +200,12 @@ bsearch: r = m; } - /* Read buckets in reverse order until we stop finding more + /* + * Read buckets in reverse order until we stop finding more * journal entries */ - pr_debug("finishing up"); + pr_debug("finishing up: m %u njournal_buckets %u", + m, ca->sb.njournal_buckets); l = m; while (1) { @@ -228,9 +233,10 @@ bsearch: } } - c->journal.seq = list_entry(list->prev, - struct journal_replay, - list)->j.seq; + if (!list_empty(list)) + c->journal.seq = list_entry(list->prev, + struct journal_replay, + list)->j.seq; return 0; #undef read_bucket @@ -428,7 +434,7 @@ static void do_journal_discard(struct cache *ca) return; } - switch (atomic_read(&ja->discard_in_flight) == DISCARD_IN_FLIGHT) { + switch (atomic_read(&ja->discard_in_flight)) { case DISCARD_IN_FLIGHT: return; @@ -689,6 +695,7 @@ void bch_journal_meta(struct cache_set *c, struct closure *cl) if (cl) BUG_ON(!closure_wait(&w->wait, cl)); + closure_flush(&c->journal.io); __journal_try_write(c, true); } } diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c index 786a1a4f74d8..b6a74bcbb08f 100644 --- a/drivers/md/bcache/request.c +++ b/drivers/md/bcache/request.c @@ -996,17 +996,19 @@ static void request_write(struct cached_dev *dc, struct search *s) closure_bio_submit(bio, cl, s->d); } else { bch_writeback_add(dc); + s->op.cache_bio = bio; - if (s->op.flush_journal) { + if (bio->bi_rw & REQ_FLUSH) { /* Also need to send a flush to the backing device */ - s->op.cache_bio = bio_clone_bioset(bio, GFP_NOIO, - dc->disk.bio_split); - - bio->bi_size = 0; - bio->bi_vcnt = 0; - closure_bio_submit(bio, cl, s->d); - } else { - s->op.cache_bio = bio; + struct bio *flush = bio_alloc_bioset(0, GFP_NOIO, + dc->disk.bio_split); + + flush->bi_rw = WRITE_FLUSH; + flush->bi_bdev = bio->bi_bdev; + flush->bi_end_io = request_endio; + flush->bi_private = cl; + + closure_bio_submit(flush, cl, s->d); } } out: diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c index 12a2c2846f99..924dcfdae111 100644 --- a/drivers/md/bcache/sysfs.c +++ b/drivers/md/bcache/sysfs.c @@ -223,8 +223,13 @@ STORE(__cached_dev) } if (attr == &sysfs_label) { - /* note: endlines are preserved */ - memcpy(dc->sb.label, buf, SB_LABEL_SIZE); + if (size > SB_LABEL_SIZE) + return -EINVAL; + memcpy(dc->sb.label, buf, size); + if (size < SB_LABEL_SIZE) + dc->sb.label[size] = '\0'; + if (size && dc->sb.label[size - 1] == '\n') + dc->sb.label[size - 1] = '\0'; bch_write_bdev_super(dc, NULL); if (dc->disk.c) { memcpy(dc->disk.c->uuids[dc->disk.id].label, @@ -556,7 +561,7 @@ STORE(__bch_cache_set) struct shrink_control sc; sc.gfp_mask = GFP_KERNEL; sc.nr_to_scan = strtoul_or_return(buf); - c->shrink.shrink(&c->shrink, &sc); + c->shrink.scan_objects(&c->shrink, &sc); } sysfs_strtoul(congested_read_threshold_us, diff --git a/drivers/md/bcache/util.c b/drivers/md/bcache/util.c index 98eb81159a22..420dad545c7d 100644 --- a/drivers/md/bcache/util.c +++ b/drivers/md/bcache/util.c @@ -190,7 +190,16 @@ void bch_time_stats_update(struct time_stats *stats, uint64_t start_time) stats->last = now ?: 1; } -unsigned bch_next_delay(struct ratelimit *d, uint64_t done) +/** + * bch_next_delay() - increment @d by the amount of work done, and return how + * long to delay until the next time to do some work. + * + * @d - the struct bch_ratelimit to update + * @done - the amount of work done, in arbitrary units + * + * Returns the amount of time to delay by, in jiffies + */ +uint64_t bch_next_delay(struct bch_ratelimit *d, uint64_t done) { uint64_t now = local_clock(); diff --git a/drivers/md/bcache/util.h b/drivers/md/bcache/util.h index 1ae2a73ad85f..ea345c6896f4 100644 --- a/drivers/md/bcache/util.h +++ b/drivers/md/bcache/util.h @@ -450,17 +450,23 @@ read_attribute(name ## _last_ ## frequency_units) (ewma) >> factor; \ }) -struct ratelimit { +struct bch_ratelimit { + /* Next time we want to do some work, in nanoseconds */ uint64_t next; + + /* + * Rate at which we want to do work, in units per nanosecond + * The units here correspond to the units passed to bch_next_delay() + */ unsigned rate; }; -static inline void ratelimit_reset(struct ratelimit *d) +static inline void bch_ratelimit_reset(struct bch_ratelimit *d) { d->next = local_clock(); } -unsigned bch_next_delay(struct ratelimit *d, uint64_t done); +uint64_t bch_next_delay(struct bch_ratelimit *d, uint64_t done); #define __DIV_SAFE(n, d, zero) \ ({ \ diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c index 22cbff551628..ba3ee48320f2 100644 --- a/drivers/md/bcache/writeback.c +++ b/drivers/md/bcache/writeback.c @@ -94,11 +94,15 @@ static void update_writeback_rate(struct work_struct *work) static unsigned writeback_delay(struct cached_dev *dc, unsigned sectors) { + uint64_t ret; + if (atomic_read(&dc->disk.detaching) || !dc->writeback_percent) return 0; - return bch_next_delay(&dc->writeback_rate, sectors * 10000000ULL); + ret = bch_next_delay(&dc->writeback_rate, sectors * 10000000ULL); + + return min_t(uint64_t, ret, HZ); } /* Background writeback */ @@ -208,7 +212,7 @@ normal_refill: up_write(&dc->writeback_lock); - ratelimit_reset(&dc->writeback_rate); + bch_ratelimit_reset(&dc->writeback_rate); /* Punt to workqueue only so we don't recurse and blow the stack */ continue_at(cl, read_dirty, dirty_wq); @@ -318,9 +322,7 @@ static void write_dirty_finish(struct closure *cl) } bch_keybuf_del(&dc->writeback_keys, w); - atomic_dec_bug(&dc->in_flight); - - closure_wake_up(&dc->writeback_wait); + up(&dc->in_flight); closure_return_with_destructor(cl, dirty_io_destructor); } @@ -349,7 +351,7 @@ static void write_dirty(struct closure *cl) closure_bio_submit(&io->bio, cl, &io->dc->disk); - continue_at(cl, write_dirty_finish, dirty_wq); + continue_at(cl, write_dirty_finish, system_wq); } static void read_dirty_endio(struct bio *bio, int error) @@ -369,7 +371,7 @@ static void read_dirty_submit(struct closure *cl) closure_bio_submit(&io->bio, cl, &io->dc->disk); - continue_at(cl, write_dirty, dirty_wq); + continue_at(cl, write_dirty, system_wq); } static void read_dirty(struct closure *cl) @@ -394,12 +396,8 @@ static void read_dirty(struct closure *cl) if (delay > 0 && (KEY_START(&w->key) != dc->last_read || - jiffies_to_msecs(delay) > 50)) { - w->private = NULL; - - closure_delay(&dc->writeback, delay); - continue_at(cl, read_dirty, dirty_wq); - } + jiffies_to_msecs(delay) > 50)) + delay = schedule_timeout_uninterruptible(delay); dc->last_read = KEY_OFFSET(&w->key); @@ -424,15 +422,10 @@ static void read_dirty(struct closure *cl) trace_bcache_writeback(&w->key); - closure_call(&io->cl, read_dirty_submit, NULL, &dc->disk.cl); + down(&dc->in_flight); + closure_call(&io->cl, read_dirty_submit, NULL, cl); delay = writeback_delay(dc, KEY_SIZE(&w->key)); - - atomic_inc(&dc->in_flight); - - if (!closure_wait_event(&dc->writeback_wait, cl, - atomic_read(&dc->in_flight) < 64)) - continue_at(cl, read_dirty, dirty_wq); } if (0) { @@ -442,7 +435,11 @@ err: bch_keybuf_del(&dc->writeback_keys, w); } - refill_dirty(cl); + /* + * Wait for outstanding writeback IOs to finish (and keybuf slots to be + * freed) before refilling again + */ + continue_at(cl, refill_dirty, dirty_wq); } /* Init */ @@ -484,6 +481,7 @@ void bch_sectors_dirty_init(struct cached_dev *dc) void bch_cached_dev_writeback_init(struct cached_dev *dc) { + sema_init(&dc->in_flight, 64); closure_init_unlocked(&dc->writeback); init_rwsem(&dc->writeback_lock); @@ -513,7 +511,7 @@ void bch_writeback_exit(void) int __init bch_writeback_init(void) { - dirty_wq = create_singlethread_workqueue("bcache_writeback"); + dirty_wq = create_workqueue("bcache_writeback"); if (!dirty_wq) return -ENOMEM; diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c index 5227e079a6e3..173cbb20d104 100644 --- a/drivers/md/dm-bufio.c +++ b/drivers/md/dm-bufio.c @@ -1425,62 +1425,75 @@ static int __cleanup_old_buffer(struct dm_buffer *b, gfp_t gfp, unsigned long max_jiffies) { if (jiffies - b->last_accessed < max_jiffies) - return 1; + return 0; if (!(gfp & __GFP_IO)) { if (test_bit(B_READING, &b->state) || test_bit(B_WRITING, &b->state) || test_bit(B_DIRTY, &b->state)) - return 1; + return 0; } if (b->hold_count) - return 1; + return 0; __make_buffer_clean(b); __unlink_buffer(b); __free_buffer_wake(b); - return 0; + return 1; } -static void __scan(struct dm_bufio_client *c, unsigned long nr_to_scan, - struct shrink_control *sc) +static long __scan(struct dm_bufio_client *c, unsigned long nr_to_scan, + gfp_t gfp_mask) { int l; struct dm_buffer *b, *tmp; + long freed = 0; for (l = 0; l < LIST_SIZE; l++) { - list_for_each_entry_safe_reverse(b, tmp, &c->lru[l], lru_list) - if (!__cleanup_old_buffer(b, sc->gfp_mask, 0) && - !--nr_to_scan) - return; + list_for_each_entry_safe_reverse(b, tmp, &c->lru[l], lru_list) { + freed += __cleanup_old_buffer(b, gfp_mask, 0); + if (!--nr_to_scan) + break; + } dm_bufio_cond_resched(); } + return freed; } -static int shrink(struct shrinker *shrinker, struct shrink_control *sc) +static unsigned long +dm_bufio_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) { - struct dm_bufio_client *c = - container_of(shrinker, struct dm_bufio_client, shrinker); - unsigned long r; - unsigned long nr_to_scan = sc->nr_to_scan; + struct dm_bufio_client *c; + unsigned long freed; + c = container_of(shrink, struct dm_bufio_client, shrinker); if (sc->gfp_mask & __GFP_IO) dm_bufio_lock(c); else if (!dm_bufio_trylock(c)) - return !nr_to_scan ? 0 : -1; + return SHRINK_STOP; - if (nr_to_scan) - __scan(c, nr_to_scan, sc); + freed = __scan(c, sc->nr_to_scan, sc->gfp_mask); + dm_bufio_unlock(c); + return freed; +} - r = c->n_buffers[LIST_CLEAN] + c->n_buffers[LIST_DIRTY]; - if (r > INT_MAX) - r = INT_MAX; +static unsigned long +dm_bufio_shrink_count(struct shrinker *shrink, struct shrink_control *sc) +{ + struct dm_bufio_client *c; + unsigned long count; - dm_bufio_unlock(c); + c = container_of(shrink, struct dm_bufio_client, shrinker); + if (sc->gfp_mask & __GFP_IO) + dm_bufio_lock(c); + else if (!dm_bufio_trylock(c)) + return 0; - return r; + count = c->n_buffers[LIST_CLEAN] + c->n_buffers[LIST_DIRTY]; + dm_bufio_unlock(c); + return count; } /* @@ -1582,7 +1595,8 @@ struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsign __cache_size_refresh(); mutex_unlock(&dm_bufio_clients_lock); - c->shrinker.shrink = shrink; + c->shrinker.count_objects = dm_bufio_shrink_count; + c->shrinker.scan_objects = dm_bufio_shrink_scan; c->shrinker.seeks = 1; c->shrinker.batch = 0; register_shrinker(&c->shrinker); @@ -1669,7 +1683,7 @@ static void cleanup_old_buffers(void) struct dm_buffer *b; b = list_entry(c->lru[LIST_CLEAN].prev, struct dm_buffer, lru_list); - if (__cleanup_old_buffer(b, 0, max_age * HZ)) + if (!__cleanup_old_buffer(b, 0, max_age * HZ)) break; dm_bufio_cond_resched(); } diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c index 0df3ec085ebb..29569768ffbf 100644 --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c @@ -67,9 +67,11 @@ static void free_bitset(unsigned long *bits) #define MIGRATION_COUNT_WINDOW 10 /* - * The block size of the device holding cache data must be >= 32KB + * The block size of the device holding cache data must be + * between 32KB and 1GB. */ #define DATA_DEV_BLOCK_SIZE_MIN_SECTORS (32 * 1024 >> SECTOR_SHIFT) +#define DATA_DEV_BLOCK_SIZE_MAX_SECTORS (1024 * 1024 * 1024 >> SECTOR_SHIFT) /* * FIXME: the cache is read/write for the time being. @@ -101,6 +103,8 @@ struct cache { struct dm_target *ti; struct dm_target_callbacks callbacks; + struct dm_cache_metadata *cmd; + /* * Metadata is written to this device. */ @@ -117,11 +121,6 @@ struct cache { struct dm_dev *cache_dev; /* - * Cache features such as write-through. - */ - struct cache_features features; - - /* * Size of the origin device in _complete_ blocks and native sectors. */ dm_oblock_t origin_blocks; @@ -138,8 +137,6 @@ struct cache { uint32_t sectors_per_block; int sectors_per_block_shift; - struct dm_cache_metadata *cmd; - spinlock_t lock; struct bio_list deferred_bios; struct bio_list deferred_flush_bios; @@ -148,8 +145,8 @@ struct cache { struct list_head completed_migrations; struct list_head need_commit_migrations; sector_t migration_threshold; - atomic_t nr_migrations; wait_queue_head_t migration_wait; + atomic_t nr_migrations; /* * cache_size entries, dirty if set @@ -160,9 +157,16 @@ struct cache { /* * origin_blocks entries, discarded if set. */ - uint32_t discard_block_size; /* a power of 2 times sectors per block */ dm_dblock_t discard_nr_blocks; unsigned long *discard_bitset; + uint32_t discard_block_size; /* a power of 2 times sectors per block */ + + /* + * Rather than reconstructing the table line for the status we just + * save it and regurgitate. + */ + unsigned nr_ctr_args; + const char **ctr_args; struct dm_kcopyd_client *copier; struct workqueue_struct *wq; @@ -187,14 +191,12 @@ struct cache { bool loaded_mappings:1; bool loaded_discards:1; - struct cache_stats stats; - /* - * Rather than reconstructing the table line for the status we just - * save it and regurgitate. + * Cache features such as write-through. */ - unsigned nr_ctr_args; - const char **ctr_args; + struct cache_features features; + + struct cache_stats stats; }; struct per_bio_data { @@ -1687,24 +1689,25 @@ static int parse_origin_dev(struct cache_args *ca, struct dm_arg_set *as, static int parse_block_size(struct cache_args *ca, struct dm_arg_set *as, char **error) { - unsigned long tmp; + unsigned long block_size; if (!at_least_one_arg(as, error)) return -EINVAL; - if (kstrtoul(dm_shift_arg(as), 10, &tmp) || !tmp || - tmp < DATA_DEV_BLOCK_SIZE_MIN_SECTORS || - tmp & (DATA_DEV_BLOCK_SIZE_MIN_SECTORS - 1)) { + if (kstrtoul(dm_shift_arg(as), 10, &block_size) || !block_size || + block_size < DATA_DEV_BLOCK_SIZE_MIN_SECTORS || + block_size > DATA_DEV_BLOCK_SIZE_MAX_SECTORS || + block_size & (DATA_DEV_BLOCK_SIZE_MIN_SECTORS - 1)) { *error = "Invalid data block size"; return -EINVAL; } - if (tmp > ca->cache_sectors) { + if (block_size > ca->cache_sectors) { *error = "Data block size is larger than the cache device"; return -EINVAL; } - ca->block_size = tmp; + ca->block_size = block_size; return 0; } @@ -2609,9 +2612,17 @@ static void set_discard_limits(struct cache *cache, struct queue_limits *limits) static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits) { struct cache *cache = ti->private; + uint64_t io_opt_sectors = limits->io_opt >> SECTOR_SHIFT; - blk_limits_io_min(limits, 0); - blk_limits_io_opt(limits, cache->sectors_per_block << SECTOR_SHIFT); + /* + * If the system-determined stacked limits are compatible with the + * cache's blocksize (io_opt is a factor) do not override them. + */ + if (io_opt_sectors < cache->sectors_per_block || + do_div(io_opt_sectors, cache->sectors_per_block)) { + blk_limits_io_min(limits, 0); + blk_limits_io_opt(limits, cache->sectors_per_block << SECTOR_SHIFT); + } set_discard_limits(cache, limits); } diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 6d2d41ae9e32..0fce0bc1a957 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -1645,20 +1645,14 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) } ret = -ENOMEM; - cc->io_queue = alloc_workqueue("kcryptd_io", - WQ_NON_REENTRANT| - WQ_MEM_RECLAIM, - 1); + cc->io_queue = alloc_workqueue("kcryptd_io", WQ_MEM_RECLAIM, 1); if (!cc->io_queue) { ti->error = "Couldn't create kcryptd io queue"; goto bad; } cc->crypt_queue = alloc_workqueue("kcryptd", - WQ_NON_REENTRANT| - WQ_CPU_INTENSIVE| - WQ_MEM_RECLAIM, - 1); + WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, 1); if (!cc->crypt_queue) { ti->error = "Couldn't create kcryptd queue"; goto bad; diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c index ea49834377c8..2a20986a2fec 100644 --- a/drivers/md/dm-io.c +++ b/drivers/md/dm-io.c @@ -19,8 +19,6 @@ #define DM_MSG_PREFIX "io" #define DM_IO_MAX_REGIONS BITS_PER_LONG -#define MIN_IOS 16 -#define MIN_BIOS 16 struct dm_io_client { mempool_t *pool; @@ -50,16 +48,17 @@ static struct kmem_cache *_dm_io_cache; struct dm_io_client *dm_io_client_create(void) { struct dm_io_client *client; + unsigned min_ios = dm_get_reserved_bio_based_ios(); client = kmalloc(sizeof(*client), GFP_KERNEL); if (!client) return ERR_PTR(-ENOMEM); - client->pool = mempool_create_slab_pool(MIN_IOS, _dm_io_cache); + client->pool = mempool_create_slab_pool(min_ios, _dm_io_cache); if (!client->pool) goto bad; - client->bios = bioset_create(MIN_BIOS, 0); + client->bios = bioset_create(min_ios, 0); if (!client->bios) goto bad; diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c index f1b758675ec7..afe08146f73e 100644 --- a/drivers/md/dm-ioctl.c +++ b/drivers/md/dm-ioctl.c @@ -877,7 +877,7 @@ static int dev_rename(struct dm_ioctl *param, size_t param_size) unsigned change_uuid = (param->flags & DM_UUID_FLAG) ? 1 : 0; if (new_data < param->data || - invalid_str(new_data, (void *) param + param_size) || + invalid_str(new_data, (void *) param + param_size) || !*new_data || strlen(new_data) > (change_uuid ? DM_UUID_LEN - 1 : DM_NAME_LEN - 1)) { DMWARN("Invalid new mapped device name or uuid string supplied."); return -EINVAL; @@ -1262,44 +1262,37 @@ static int table_load(struct dm_ioctl *param, size_t param_size) r = dm_table_create(&t, get_mode(param), param->target_count, md); if (r) - goto out; + goto err; + /* Protect md->type and md->queue against concurrent table loads. */ + dm_lock_md_type(md); r = populate_table(t, param, param_size); - if (r) { - dm_table_destroy(t); - goto out; - } + if (r) + goto err_unlock_md_type; immutable_target_type = dm_get_immutable_target_type(md); if (immutable_target_type && (immutable_target_type != dm_table_get_immutable_target_type(t))) { DMWARN("can't replace immutable target type %s", immutable_target_type->name); - dm_table_destroy(t); r = -EINVAL; - goto out; + goto err_unlock_md_type; } - /* Protect md->type and md->queue against concurrent table loads. */ - dm_lock_md_type(md); if (dm_get_md_type(md) == DM_TYPE_NONE) /* Initial table load: acquire type of table. */ dm_set_md_type(md, dm_table_get_type(t)); else if (dm_get_md_type(md) != dm_table_get_type(t)) { DMWARN("can't change device type after initial table load."); - dm_table_destroy(t); - dm_unlock_md_type(md); r = -EINVAL; - goto out; + goto err_unlock_md_type; } /* setup md->queue to reflect md's type (may block) */ r = dm_setup_md_queue(md); if (r) { DMWARN("unable to set up device queue for new table."); - dm_table_destroy(t); - dm_unlock_md_type(md); - goto out; + goto err_unlock_md_type; } dm_unlock_md_type(md); @@ -1309,9 +1302,8 @@ static int table_load(struct dm_ioctl *param, size_t param_size) if (!hc || hc->md != md) { DMWARN("device has been removed from the dev hash table."); up_write(&_hash_lock); - dm_table_destroy(t); r = -ENXIO; - goto out; + goto err_destroy_table; } if (hc->new_map) @@ -1322,7 +1314,6 @@ static int table_load(struct dm_ioctl *param, size_t param_size) param->flags |= DM_INACTIVE_PRESENT_FLAG; __dev_status(md, param); -out: if (old_map) { dm_sync_table(md); dm_table_destroy(old_map); @@ -1330,6 +1321,15 @@ out: dm_put(md); + return 0; + +err_unlock_md_type: + dm_unlock_md_type(md); +err_destroy_table: + dm_table_destroy(t); +err: + dm_put(md); + return r; } @@ -1455,20 +1455,26 @@ static int table_status(struct dm_ioctl *param, size_t param_size) return 0; } -static bool buffer_test_overflow(char *result, unsigned maxlen) -{ - return !maxlen || strlen(result) + 1 >= maxlen; -} - /* - * Process device-mapper dependent messages. + * Process device-mapper dependent messages. Messages prefixed with '@' + * are processed by the DM core. All others are delivered to the target. * Returns a number <= 1 if message was processed by device mapper. * Returns 2 if message should be delivered to the target. */ static int message_for_md(struct mapped_device *md, unsigned argc, char **argv, char *result, unsigned maxlen) { - return 2; + int r; + + if (**argv != '@') + return 2; /* no '@' prefix, deliver to target */ + + r = dm_stats_message(md, argc, argv, result, maxlen); + if (r < 2) + return r; + + DMERR("Unsupported message sent to DM core: %s", argv[0]); + return -EINVAL; } /* @@ -1542,7 +1548,7 @@ static int target_message(struct dm_ioctl *param, size_t param_size) if (r == 1) { param->flags |= DM_DATA_OUT_FLAG; - if (buffer_test_overflow(result, maxlen)) + if (dm_message_test_buffer_overflow(result, maxlen)) param->flags |= DM_BUFFER_FULL_FLAG; else param->data_size = param->data_start + strlen(result) + 1; diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c index d581fe5d2faf..3a7cade5e27d 100644 --- a/drivers/md/dm-kcopyd.c +++ b/drivers/md/dm-kcopyd.c @@ -833,8 +833,7 @@ struct dm_kcopyd_client *dm_kcopyd_client_create(struct dm_kcopyd_throttle *thro goto bad_slab; INIT_WORK(&kc->kcopyd_work, do_work); - kc->kcopyd_wq = alloc_workqueue("kcopyd", - WQ_NON_REENTRANT | WQ_MEM_RECLAIM, 0); + kc->kcopyd_wq = alloc_workqueue("kcopyd", WQ_MEM_RECLAIM, 0); if (!kc->kcopyd_wq) goto bad_workqueue; diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index b759a127f9c3..de570a558764 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c @@ -7,6 +7,7 @@ #include <linux/device-mapper.h> +#include "dm.h" #include "dm-path-selector.h" #include "dm-uevent.h" @@ -116,8 +117,6 @@ struct dm_mpath_io { typedef int (*action_fn) (struct pgpath *pgpath); -#define MIN_IOS 256 /* Mempool size */ - static struct kmem_cache *_mpio_cache; static struct workqueue_struct *kmultipathd, *kmpath_handlerd; @@ -190,6 +189,7 @@ static void free_priority_group(struct priority_group *pg, static struct multipath *alloc_multipath(struct dm_target *ti) { struct multipath *m; + unsigned min_ios = dm_get_reserved_rq_based_ios(); m = kzalloc(sizeof(*m), GFP_KERNEL); if (m) { @@ -202,7 +202,7 @@ static struct multipath *alloc_multipath(struct dm_target *ti) INIT_WORK(&m->trigger_event, trigger_event); init_waitqueue_head(&m->pg_init_wait); mutex_init(&m->work_mutex); - m->mpio_pool = mempool_create_slab_pool(MIN_IOS, _mpio_cache); + m->mpio_pool = mempool_create_slab_pool(min_ios, _mpio_cache); if (!m->mpio_pool) { kfree(m); return NULL; @@ -1268,6 +1268,7 @@ static int noretry_error(int error) case -EREMOTEIO: case -EILSEQ: case -ENODATA: + case -ENOSPC: return 1; } @@ -1298,8 +1299,17 @@ static int do_end_io(struct multipath *m, struct request *clone, if (!error && !clone->errors) return 0; /* I/O complete */ - if (noretry_error(error)) + if (noretry_error(error)) { + if ((clone->cmd_flags & REQ_WRITE_SAME) && + !clone->q->limits.max_write_same_sectors) { + struct queue_limits *limits; + + /* device doesn't really support WRITE SAME, disable it */ + limits = dm_get_queue_limits(dm_table_get_md(m->ti->table)); + limits->max_write_same_sectors = 0; + } return error; + } if (mpio->pgpath) fail_path(mpio->pgpath); diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c index 699b5be68d31..9584443c5614 100644 --- a/drivers/md/dm-raid1.c +++ b/drivers/md/dm-raid1.c @@ -1080,8 +1080,7 @@ static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv) ti->per_bio_data_size = sizeof(struct dm_raid1_bio_record); ti->discard_zeroes_data_unsupported = true; - ms->kmirrord_wq = alloc_workqueue("kmirrord", - WQ_NON_REENTRANT | WQ_MEM_RECLAIM, 0); + ms->kmirrord_wq = alloc_workqueue("kmirrord", WQ_MEM_RECLAIM, 0); if (!ms->kmirrord_wq) { DMERR("couldn't start kmirrord"); r = -ENOMEM; diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c index 3ac415675b6c..2d2b1b7588d7 100644 --- a/drivers/md/dm-snap-persistent.c +++ b/drivers/md/dm-snap-persistent.c @@ -256,7 +256,7 @@ static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, int rw, */ INIT_WORK_ONSTACK(&req.work, do_metadata); queue_work(ps->metadata_wq, &req.work); - flush_work(&req.work); + flush_workqueue(ps->metadata_wq); return req.result; } @@ -269,6 +269,14 @@ static chunk_t area_location(struct pstore *ps, chunk_t area) return NUM_SNAPSHOT_HDR_CHUNKS + ((ps->exceptions_per_area + 1) * area); } +static void skip_metadata(struct pstore *ps) +{ + uint32_t stride = ps->exceptions_per_area + 1; + chunk_t next_free = ps->next_free; + if (sector_div(next_free, stride) == NUM_SNAPSHOT_HDR_CHUNKS) + ps->next_free++; +} + /* * Read or write a metadata area. Remembering to skip the first * chunk which holds the header. @@ -502,6 +510,8 @@ static int read_exceptions(struct pstore *ps, ps->current_area--; + skip_metadata(ps); + return 0; } @@ -616,8 +626,6 @@ static int persistent_prepare_exception(struct dm_exception_store *store, struct dm_exception *e) { struct pstore *ps = get_info(store); - uint32_t stride; - chunk_t next_free; sector_t size = get_dev_size(dm_snap_cow(store->snap)->bdev); /* Is there enough room ? */ @@ -630,10 +638,8 @@ static int persistent_prepare_exception(struct dm_exception_store *store, * Move onto the next free pending, making sure to take * into account the location of the metadata chunks. */ - stride = (ps->exceptions_per_area + 1); - next_free = ++ps->next_free; - if (sector_div(next_free, stride) == 1) - ps->next_free++; + ps->next_free++; + skip_metadata(ps); atomic_inc(&ps->pending_count); return 0; diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c index c434e5aab2df..aec57d76db5d 100644 --- a/drivers/md/dm-snap.c +++ b/drivers/md/dm-snap.c @@ -725,17 +725,16 @@ static int calc_max_buckets(void) */ static int init_hash_tables(struct dm_snapshot *s) { - sector_t hash_size, cow_dev_size, origin_dev_size, max_buckets; + sector_t hash_size, cow_dev_size, max_buckets; /* * Calculate based on the size of the original volume or * the COW volume... */ cow_dev_size = get_dev_size(s->cow->bdev); - origin_dev_size = get_dev_size(s->origin->bdev); max_buckets = calc_max_buckets(); - hash_size = min(origin_dev_size, cow_dev_size) >> s->store->chunk_shift; + hash_size = cow_dev_size >> s->store->chunk_shift; hash_size = min(hash_size, max_buckets); if (hash_size < 64) diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c new file mode 100644 index 000000000000..3d404c1371ed --- /dev/null +++ b/drivers/md/dm-stats.c @@ -0,0 +1,980 @@ +#include <linux/errno.h> +#include <linux/numa.h> +#include <linux/slab.h> +#include <linux/rculist.h> +#include <linux/threads.h> +#include <linux/preempt.h> +#include <linux/irqflags.h> +#include <linux/vmalloc.h> +#include <linux/mm.h> +#include <linux/module.h> +#include <linux/device-mapper.h> + +#include "dm.h" +#include "dm-stats.h" + +#define DM_MSG_PREFIX "stats" + +static int dm_stat_need_rcu_barrier; + +/* + * Using 64-bit values to avoid overflow (which is a + * problem that block/genhd.c's IO accounting has). + */ +struct dm_stat_percpu { + unsigned long long sectors[2]; + unsigned long long ios[2]; + unsigned long long merges[2]; + unsigned long long ticks[2]; + unsigned long long io_ticks[2]; + unsigned long long io_ticks_total; + unsigned long long time_in_queue; +}; + +struct dm_stat_shared { + atomic_t in_flight[2]; + unsigned long stamp; + struct dm_stat_percpu tmp; +}; + +struct dm_stat { + struct list_head list_entry; + int id; + size_t n_entries; + sector_t start; + sector_t end; + sector_t step; + const char *program_id; + const char *aux_data; + struct rcu_head rcu_head; + size_t shared_alloc_size; + size_t percpu_alloc_size; + struct dm_stat_percpu *stat_percpu[NR_CPUS]; + struct dm_stat_shared stat_shared[0]; +}; + +struct dm_stats_last_position { + sector_t last_sector; + unsigned last_rw; +}; + +/* + * A typo on the command line could possibly make the kernel run out of memory + * and crash. To prevent the crash we account all used memory. We fail if we + * exhaust 1/4 of all memory or 1/2 of vmalloc space. + */ +#define DM_STATS_MEMORY_FACTOR 4 +#define DM_STATS_VMALLOC_FACTOR 2 + +static DEFINE_SPINLOCK(shared_memory_lock); + +static unsigned long shared_memory_amount; + +static bool __check_shared_memory(size_t alloc_size) +{ + size_t a; + + a = shared_memory_amount + alloc_size; + if (a < shared_memory_amount) + return false; + if (a >> PAGE_SHIFT > totalram_pages / DM_STATS_MEMORY_FACTOR) + return false; +#ifdef CONFIG_MMU + if (a > (VMALLOC_END - VMALLOC_START) / DM_STATS_VMALLOC_FACTOR) + return false; +#endif + return true; +} + +static bool check_shared_memory(size_t alloc_size) +{ + bool ret; + + spin_lock_irq(&shared_memory_lock); + + ret = __check_shared_memory(alloc_size); + + spin_unlock_irq(&shared_memory_lock); + + return ret; +} + +static bool claim_shared_memory(size_t alloc_size) +{ + spin_lock_irq(&shared_memory_lock); + + if (!__check_shared_memory(alloc_size)) { + spin_unlock_irq(&shared_memory_lock); + return false; + } + + shared_memory_amount += alloc_size; + + spin_unlock_irq(&shared_memory_lock); + + return true; +} + +static void free_shared_memory(size_t alloc_size) +{ + unsigned long flags; + + spin_lock_irqsave(&shared_memory_lock, flags); + + if (WARN_ON_ONCE(shared_memory_amount < alloc_size)) { + spin_unlock_irqrestore(&shared_memory_lock, flags); + DMCRIT("Memory usage accounting bug."); + return; + } + + shared_memory_amount -= alloc_size; + + spin_unlock_irqrestore(&shared_memory_lock, flags); +} + +static void *dm_kvzalloc(size_t alloc_size, int node) +{ + void *p; + + if (!claim_shared_memory(alloc_size)) + return NULL; + + if (alloc_size <= KMALLOC_MAX_SIZE) { + p = kzalloc_node(alloc_size, GFP_KERNEL | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN, node); + if (p) + return p; + } + p = vzalloc_node(alloc_size, node); + if (p) + return p; + + free_shared_memory(alloc_size); + + return NULL; +} + +static void dm_kvfree(void *ptr, size_t alloc_size) +{ + if (!ptr) + return; + + free_shared_memory(alloc_size); + + if (is_vmalloc_addr(ptr)) + vfree(ptr); + else + kfree(ptr); +} + +static void dm_stat_free(struct rcu_head *head) +{ + int cpu; + struct dm_stat *s = container_of(head, struct dm_stat, rcu_head); + + kfree(s->program_id); + kfree(s->aux_data); + for_each_possible_cpu(cpu) + dm_kvfree(s->stat_percpu[cpu], s->percpu_alloc_size); + dm_kvfree(s, s->shared_alloc_size); +} + +static int dm_stat_in_flight(struct dm_stat_shared *shared) +{ + return atomic_read(&shared->in_flight[READ]) + + atomic_read(&shared->in_flight[WRITE]); +} + +void dm_stats_init(struct dm_stats *stats) +{ + int cpu; + struct dm_stats_last_position *last; + + mutex_init(&stats->mutex); + INIT_LIST_HEAD(&stats->list); + stats->last = alloc_percpu(struct dm_stats_last_position); + for_each_possible_cpu(cpu) { + last = per_cpu_ptr(stats->last, cpu); + last->last_sector = (sector_t)ULLONG_MAX; + last->last_rw = UINT_MAX; + } +} + +void dm_stats_cleanup(struct dm_stats *stats) +{ + size_t ni; + struct dm_stat *s; + struct dm_stat_shared *shared; + + while (!list_empty(&stats->list)) { + s = container_of(stats->list.next, struct dm_stat, list_entry); + list_del(&s->list_entry); + for (ni = 0; ni < s->n_entries; ni++) { + shared = &s->stat_shared[ni]; + if (WARN_ON(dm_stat_in_flight(shared))) { + DMCRIT("leaked in-flight counter at index %lu " + "(start %llu, end %llu, step %llu): reads %d, writes %d", + (unsigned long)ni, + (unsigned long long)s->start, + (unsigned long long)s->end, + (unsigned long long)s->step, + atomic_read(&shared->in_flight[READ]), + atomic_read(&shared->in_flight[WRITE])); + } + } + dm_stat_free(&s->rcu_head); + } + free_percpu(stats->last); +} + +static int dm_stats_create(struct dm_stats *stats, sector_t start, sector_t end, + sector_t step, const char *program_id, const char *aux_data, + void (*suspend_callback)(struct mapped_device *), + void (*resume_callback)(struct mapped_device *), + struct mapped_device *md) +{ + struct list_head *l; + struct dm_stat *s, *tmp_s; + sector_t n_entries; + size_t ni; + size_t shared_alloc_size; + size_t percpu_alloc_size; + struct dm_stat_percpu *p; + int cpu; + int ret_id; + int r; + + if (end < start || !step) + return -EINVAL; + + n_entries = end - start; + if (dm_sector_div64(n_entries, step)) + n_entries++; + + if (n_entries != (size_t)n_entries || !(size_t)(n_entries + 1)) + return -EOVERFLOW; + + shared_alloc_size = sizeof(struct dm_stat) + (size_t)n_entries * sizeof(struct dm_stat_shared); + if ((shared_alloc_size - sizeof(struct dm_stat)) / sizeof(struct dm_stat_shared) != n_entries) + return -EOVERFLOW; + + percpu_alloc_size = (size_t)n_entries * sizeof(struct dm_stat_percpu); + if (percpu_alloc_size / sizeof(struct dm_stat_percpu) != n_entries) + return -EOVERFLOW; + + if (!check_shared_memory(shared_alloc_size + num_possible_cpus() * percpu_alloc_size)) + return -ENOMEM; + + s = dm_kvzalloc(shared_alloc_size, NUMA_NO_NODE); + if (!s) + return -ENOMEM; + + s->n_entries = n_entries; + s->start = start; + s->end = end; + s->step = step; + s->shared_alloc_size = shared_alloc_size; + s->percpu_alloc_size = percpu_alloc_size; + + s->program_id = kstrdup(program_id, GFP_KERNEL); + if (!s->program_id) { + r = -ENOMEM; + goto out; + } + s->aux_data = kstrdup(aux_data, GFP_KERNEL); + if (!s->aux_data) { + r = -ENOMEM; + goto out; + } + + for (ni = 0; ni < n_entries; ni++) { + atomic_set(&s->stat_shared[ni].in_flight[READ], 0); + atomic_set(&s->stat_shared[ni].in_flight[WRITE], 0); + } + + for_each_possible_cpu(cpu) { + p = dm_kvzalloc(percpu_alloc_size, cpu_to_node(cpu)); + if (!p) { + r = -ENOMEM; + goto out; + } + s->stat_percpu[cpu] = p; + } + + /* + * Suspend/resume to make sure there is no i/o in flight, + * so that newly created statistics will be exact. + * + * (note: we couldn't suspend earlier because we must not + * allocate memory while suspended) + */ + suspend_callback(md); + + mutex_lock(&stats->mutex); + s->id = 0; + list_for_each(l, &stats->list) { + tmp_s = container_of(l, struct dm_stat, list_entry); + if (WARN_ON(tmp_s->id < s->id)) { + r = -EINVAL; + goto out_unlock_resume; + } + if (tmp_s->id > s->id) + break; + if (unlikely(s->id == INT_MAX)) { + r = -ENFILE; + goto out_unlock_resume; + } + s->id++; + } + ret_id = s->id; + list_add_tail_rcu(&s->list_entry, l); + mutex_unlock(&stats->mutex); + + resume_callback(md); + + return ret_id; + +out_unlock_resume: + mutex_unlock(&stats->mutex); + resume_callback(md); +out: + dm_stat_free(&s->rcu_head); + return r; +} + +static struct dm_stat *__dm_stats_find(struct dm_stats *stats, int id) +{ + struct dm_stat *s; + + list_for_each_entry(s, &stats->list, list_entry) { + if (s->id > id) + break; + if (s->id == id) + return s; + } + + return NULL; +} + +static int dm_stats_delete(struct dm_stats *stats, int id) +{ + struct dm_stat *s; + int cpu; + + mutex_lock(&stats->mutex); + + s = __dm_stats_find(stats, id); + if (!s) { + mutex_unlock(&stats->mutex); + return -ENOENT; + } + + list_del_rcu(&s->list_entry); + mutex_unlock(&stats->mutex); + + /* + * vfree can't be called from RCU callback + */ + for_each_possible_cpu(cpu) + if (is_vmalloc_addr(s->stat_percpu)) + goto do_sync_free; + if (is_vmalloc_addr(s)) { +do_sync_free: + synchronize_rcu_expedited(); + dm_stat_free(&s->rcu_head); + } else { + ACCESS_ONCE(dm_stat_need_rcu_barrier) = 1; + call_rcu(&s->rcu_head, dm_stat_free); + } + return 0; +} + +static int dm_stats_list(struct dm_stats *stats, const char *program, + char *result, unsigned maxlen) +{ + struct dm_stat *s; + sector_t len; + unsigned sz = 0; + + /* + * Output format: + * <region_id>: <start_sector>+<length> <step> <program_id> <aux_data> + */ + + mutex_lock(&stats->mutex); + list_for_each_entry(s, &stats->list, list_entry) { + if (!program || !strcmp(program, s->program_id)) { + len = s->end - s->start; + DMEMIT("%d: %llu+%llu %llu %s %s\n", s->id, + (unsigned long long)s->start, + (unsigned long long)len, + (unsigned long long)s->step, + s->program_id, + s->aux_data); + } + } + mutex_unlock(&stats->mutex); + + return 1; +} + +static void dm_stat_round(struct dm_stat_shared *shared, struct dm_stat_percpu *p) +{ + /* + * This is racy, but so is part_round_stats_single. + */ + unsigned long now = jiffies; + unsigned in_flight_read; + unsigned in_flight_write; + unsigned long difference = now - shared->stamp; + + if (!difference) + return; + in_flight_read = (unsigned)atomic_read(&shared->in_flight[READ]); + in_flight_write = (unsigned)atomic_read(&shared->in_flight[WRITE]); + if (in_flight_read) + p->io_ticks[READ] += difference; + if (in_flight_write) + p->io_ticks[WRITE] += difference; + if (in_flight_read + in_flight_write) { + p->io_ticks_total += difference; + p->time_in_queue += (in_flight_read + in_flight_write) * difference; + } + shared->stamp = now; +} + +static void dm_stat_for_entry(struct dm_stat *s, size_t entry, + unsigned long bi_rw, sector_t len, bool merged, + bool end, unsigned long duration) +{ + unsigned long idx = bi_rw & REQ_WRITE; + struct dm_stat_shared *shared = &s->stat_shared[entry]; + struct dm_stat_percpu *p; + + /* + * For strict correctness we should use local_irq_save/restore + * instead of preempt_disable/enable. + * + * preempt_disable/enable is racy if the driver finishes bios + * from non-interrupt context as well as from interrupt context + * or from more different interrupts. + * + * On 64-bit architectures the race only results in not counting some + * events, so it is acceptable. On 32-bit architectures the race could + * cause the counter going off by 2^32, so we need to do proper locking + * there. + * + * part_stat_lock()/part_stat_unlock() have this race too. + */ +#if BITS_PER_LONG == 32 + unsigned long flags; + local_irq_save(flags); +#else + preempt_disable(); +#endif + p = &s->stat_percpu[smp_processor_id()][entry]; + + if (!end) { + dm_stat_round(shared, p); + atomic_inc(&shared->in_flight[idx]); + } else { + dm_stat_round(shared, p); + atomic_dec(&shared->in_flight[idx]); + p->sectors[idx] += len; + p->ios[idx] += 1; + p->merges[idx] += merged; + p->ticks[idx] += duration; + } + +#if BITS_PER_LONG == 32 + local_irq_restore(flags); +#else + preempt_enable(); +#endif +} + +static void __dm_stat_bio(struct dm_stat *s, unsigned long bi_rw, + sector_t bi_sector, sector_t end_sector, + bool end, unsigned long duration, + struct dm_stats_aux *stats_aux) +{ + sector_t rel_sector, offset, todo, fragment_len; + size_t entry; + + if (end_sector <= s->start || bi_sector >= s->end) + return; + if (unlikely(bi_sector < s->start)) { + rel_sector = 0; + todo = end_sector - s->start; + } else { + rel_sector = bi_sector - s->start; + todo = end_sector - bi_sector; + } + if (unlikely(end_sector > s->end)) + todo -= (end_sector - s->end); + + offset = dm_sector_div64(rel_sector, s->step); + entry = rel_sector; + do { + if (WARN_ON_ONCE(entry >= s->n_entries)) { + DMCRIT("Invalid area access in region id %d", s->id); + return; + } + fragment_len = todo; + if (fragment_len > s->step - offset) + fragment_len = s->step - offset; + dm_stat_for_entry(s, entry, bi_rw, fragment_len, + stats_aux->merged, end, duration); + todo -= fragment_len; + entry++; + offset = 0; + } while (unlikely(todo != 0)); +} + +void dm_stats_account_io(struct dm_stats *stats, unsigned long bi_rw, + sector_t bi_sector, unsigned bi_sectors, bool end, + unsigned long duration, struct dm_stats_aux *stats_aux) +{ + struct dm_stat *s; + sector_t end_sector; + struct dm_stats_last_position *last; + + if (unlikely(!bi_sectors)) + return; + + end_sector = bi_sector + bi_sectors; + + if (!end) { + /* + * A race condition can at worst result in the merged flag being + * misrepresented, so we don't have to disable preemption here. + */ + last = __this_cpu_ptr(stats->last); + stats_aux->merged = + (bi_sector == (ACCESS_ONCE(last->last_sector) && + ((bi_rw & (REQ_WRITE | REQ_DISCARD)) == + (ACCESS_ONCE(last->last_rw) & (REQ_WRITE | REQ_DISCARD))) + )); + ACCESS_ONCE(last->last_sector) = end_sector; + ACCESS_ONCE(last->last_rw) = bi_rw; + } + + rcu_read_lock(); + + list_for_each_entry_rcu(s, &stats->list, list_entry) + __dm_stat_bio(s, bi_rw, bi_sector, end_sector, end, duration, stats_aux); + + rcu_read_unlock(); +} + +static void __dm_stat_init_temporary_percpu_totals(struct dm_stat_shared *shared, + struct dm_stat *s, size_t x) +{ + int cpu; + struct dm_stat_percpu *p; + + local_irq_disable(); + p = &s->stat_percpu[smp_processor_id()][x]; + dm_stat_round(shared, p); + local_irq_enable(); + + memset(&shared->tmp, 0, sizeof(shared->tmp)); + for_each_possible_cpu(cpu) { + p = &s->stat_percpu[cpu][x]; + shared->tmp.sectors[READ] += ACCESS_ONCE(p->sectors[READ]); + shared->tmp.sectors[WRITE] += ACCESS_ONCE(p->sectors[WRITE]); + shared->tmp.ios[READ] += ACCESS_ONCE(p->ios[READ]); + shared->tmp.ios[WRITE] += ACCESS_ONCE(p->ios[WRITE]); + shared->tmp.merges[READ] += ACCESS_ONCE(p->merges[READ]); + shared->tmp.merges[WRITE] += ACCESS_ONCE(p->merges[WRITE]); + shared->tmp.ticks[READ] += ACCESS_ONCE(p->ticks[READ]); + shared->tmp.ticks[WRITE] += ACCESS_ONCE(p->ticks[WRITE]); + shared->tmp.io_ticks[READ] += ACCESS_ONCE(p->io_ticks[READ]); + shared->tmp.io_ticks[WRITE] += ACCESS_ONCE(p->io_ticks[WRITE]); + shared->tmp.io_ticks_total += ACCESS_ONCE(p->io_ticks_total); + shared->tmp.time_in_queue += ACCESS_ONCE(p->time_in_queue); + } +} + +static void __dm_stat_clear(struct dm_stat *s, size_t idx_start, size_t idx_end, + bool init_tmp_percpu_totals) +{ + size_t x; + struct dm_stat_shared *shared; + struct dm_stat_percpu *p; + + for (x = idx_start; x < idx_end; x++) { + shared = &s->stat_shared[x]; + if (init_tmp_percpu_totals) + __dm_stat_init_temporary_percpu_totals(shared, s, x); + local_irq_disable(); + p = &s->stat_percpu[smp_processor_id()][x]; + p->sectors[READ] -= shared->tmp.sectors[READ]; + p->sectors[WRITE] -= shared->tmp.sectors[WRITE]; + p->ios[READ] -= shared->tmp.ios[READ]; + p->ios[WRITE] -= shared->tmp.ios[WRITE]; + p->merges[READ] -= shared->tmp.merges[READ]; + p->merges[WRITE] -= shared->tmp.merges[WRITE]; + p->ticks[READ] -= shared->tmp.ticks[READ]; + p->ticks[WRITE] -= shared->tmp.ticks[WRITE]; + p->io_ticks[READ] -= shared->tmp.io_ticks[READ]; + p->io_ticks[WRITE] -= shared->tmp.io_ticks[WRITE]; + p->io_ticks_total -= shared->tmp.io_ticks_total; + p->time_in_queue -= shared->tmp.time_in_queue; + local_irq_enable(); + } +} + +static int dm_stats_clear(struct dm_stats *stats, int id) +{ + struct dm_stat *s; + + mutex_lock(&stats->mutex); + + s = __dm_stats_find(stats, id); + if (!s) { + mutex_unlock(&stats->mutex); + return -ENOENT; + } + + __dm_stat_clear(s, 0, s->n_entries, true); + + mutex_unlock(&stats->mutex); + + return 1; +} + +/* + * This is like jiffies_to_msec, but works for 64-bit values. + */ +static unsigned long long dm_jiffies_to_msec64(unsigned long long j) +{ + unsigned long long result = 0; + unsigned mult; + + if (j) + result = jiffies_to_msecs(j & 0x3fffff); + if (j >= 1 << 22) { + mult = jiffies_to_msecs(1 << 22); + result += (unsigned long long)mult * (unsigned long long)jiffies_to_msecs((j >> 22) & 0x3fffff); + } + if (j >= 1ULL << 44) + result += (unsigned long long)mult * (unsigned long long)mult * (unsigned long long)jiffies_to_msecs(j >> 44); + + return result; +} + +static int dm_stats_print(struct dm_stats *stats, int id, + size_t idx_start, size_t idx_len, + bool clear, char *result, unsigned maxlen) +{ + unsigned sz = 0; + struct dm_stat *s; + size_t x; + sector_t start, end, step; + size_t idx_end; + struct dm_stat_shared *shared; + + /* + * Output format: + * <start_sector>+<length> counters + */ + + mutex_lock(&stats->mutex); + + s = __dm_stats_find(stats, id); + if (!s) { + mutex_unlock(&stats->mutex); + return -ENOENT; + } + + idx_end = idx_start + idx_len; + if (idx_end < idx_start || + idx_end > s->n_entries) + idx_end = s->n_entries; + + if (idx_start > idx_end) + idx_start = idx_end; + + step = s->step; + start = s->start + (step * idx_start); + + for (x = idx_start; x < idx_end; x++, start = end) { + shared = &s->stat_shared[x]; + end = start + step; + if (unlikely(end > s->end)) + end = s->end; + + __dm_stat_init_temporary_percpu_totals(shared, s, x); + + DMEMIT("%llu+%llu %llu %llu %llu %llu %llu %llu %llu %llu %d %llu %llu %llu %llu\n", + (unsigned long long)start, + (unsigned long long)step, + shared->tmp.ios[READ], + shared->tmp.merges[READ], + shared->tmp.sectors[READ], + dm_jiffies_to_msec64(shared->tmp.ticks[READ]), + shared->tmp.ios[WRITE], + shared->tmp.merges[WRITE], + shared->tmp.sectors[WRITE], + dm_jiffies_to_msec64(shared->tmp.ticks[WRITE]), + dm_stat_in_flight(shared), + dm_jiffies_to_msec64(shared->tmp.io_ticks_total), + dm_jiffies_to_msec64(shared->tmp.time_in_queue), + dm_jiffies_to_msec64(shared->tmp.io_ticks[READ]), + dm_jiffies_to_msec64(shared->tmp.io_ticks[WRITE])); + + if (unlikely(sz + 1 >= maxlen)) + goto buffer_overflow; + } + + if (clear) + __dm_stat_clear(s, idx_start, idx_end, false); + +buffer_overflow: + mutex_unlock(&stats->mutex); + + return 1; +} + +static int dm_stats_set_aux(struct dm_stats *stats, int id, const char *aux_data) +{ + struct dm_stat *s; + const char *new_aux_data; + + mutex_lock(&stats->mutex); + + s = __dm_stats_find(stats, id); + if (!s) { + mutex_unlock(&stats->mutex); + return -ENOENT; + } + + new_aux_data = kstrdup(aux_data, GFP_KERNEL); + if (!new_aux_data) { + mutex_unlock(&stats->mutex); + return -ENOMEM; + } + + kfree(s->aux_data); + s->aux_data = new_aux_data; + + mutex_unlock(&stats->mutex); + + return 0; +} + +static int message_stats_create(struct mapped_device *md, + unsigned argc, char **argv, + char *result, unsigned maxlen) +{ + int id; + char dummy; + unsigned long long start, end, len, step; + unsigned divisor; + const char *program_id, *aux_data; + + /* + * Input format: + * <range> <step> [<program_id> [<aux_data>]] + */ + + if (argc < 3 || argc > 5) + return -EINVAL; + + if (!strcmp(argv[1], "-")) { + start = 0; + len = dm_get_size(md); + if (!len) + len = 1; + } else if (sscanf(argv[1], "%llu+%llu%c", &start, &len, &dummy) != 2 || + start != (sector_t)start || len != (sector_t)len) + return -EINVAL; + + end = start + len; + if (start >= end) + return -EINVAL; + + if (sscanf(argv[2], "/%u%c", &divisor, &dummy) == 1) { + step = end - start; + if (do_div(step, divisor)) + step++; + if (!step) + step = 1; + } else if (sscanf(argv[2], "%llu%c", &step, &dummy) != 1 || + step != (sector_t)step || !step) + return -EINVAL; + + program_id = "-"; + aux_data = "-"; + + if (argc > 3) + program_id = argv[3]; + + if (argc > 4) + aux_data = argv[4]; + + /* + * If a buffer overflow happens after we created the region, + * it's too late (the userspace would retry with a larger + * buffer, but the region id that caused the overflow is already + * leaked). So we must detect buffer overflow in advance. + */ + snprintf(result, maxlen, "%d", INT_MAX); + if (dm_message_test_buffer_overflow(result, maxlen)) + return 1; + + id = dm_stats_create(dm_get_stats(md), start, end, step, program_id, aux_data, + dm_internal_suspend, dm_internal_resume, md); + if (id < 0) + return id; + + snprintf(result, maxlen, "%d", id); + + return 1; +} + +static int message_stats_delete(struct mapped_device *md, + unsigned argc, char **argv) +{ + int id; + char dummy; + + if (argc != 2) + return -EINVAL; + + if (sscanf(argv[1], "%d%c", &id, &dummy) != 1 || id < 0) + return -EINVAL; + + return dm_stats_delete(dm_get_stats(md), id); +} + +static int message_stats_clear(struct mapped_device *md, + unsigned argc, char **argv) +{ + int id; + char dummy; + + if (argc != 2) + return -EINVAL; + + if (sscanf(argv[1], "%d%c", &id, &dummy) != 1 || id < 0) + return -EINVAL; + + return dm_stats_clear(dm_get_stats(md), id); +} + +static int message_stats_list(struct mapped_device *md, + unsigned argc, char **argv, + char *result, unsigned maxlen) +{ + int r; + const char *program = NULL; + + if (argc < 1 || argc > 2) + return -EINVAL; + + if (argc > 1) { + program = kstrdup(argv[1], GFP_KERNEL); + if (!program) + return -ENOMEM; + } + + r = dm_stats_list(dm_get_stats(md), program, result, maxlen); + + kfree(program); + + return r; +} + +static int message_stats_print(struct mapped_device *md, + unsigned argc, char **argv, bool clear, + char *result, unsigned maxlen) +{ + int id; + char dummy; + unsigned long idx_start = 0, idx_len = ULONG_MAX; + + if (argc != 2 && argc != 4) + return -EINVAL; + + if (sscanf(argv[1], "%d%c", &id, &dummy) != 1 || id < 0) + return -EINVAL; + + if (argc > 3) { + if (strcmp(argv[2], "-") && + sscanf(argv[2], "%lu%c", &idx_start, &dummy) != 1) + return -EINVAL; + if (strcmp(argv[3], "-") && + sscanf(argv[3], "%lu%c", &idx_len, &dummy) != 1) + return -EINVAL; + } + + return dm_stats_print(dm_get_stats(md), id, idx_start, idx_len, clear, + result, maxlen); +} + +static int message_stats_set_aux(struct mapped_device *md, + unsigned argc, char **argv) +{ + int id; + char dummy; + + if (argc != 3) + return -EINVAL; + + if (sscanf(argv[1], "%d%c", &id, &dummy) != 1 || id < 0) + return -EINVAL; + + return dm_stats_set_aux(dm_get_stats(md), id, argv[2]); +} + +int dm_stats_message(struct mapped_device *md, unsigned argc, char **argv, + char *result, unsigned maxlen) +{ + int r; + + if (dm_request_based(md)) { + DMWARN("Statistics are only supported for bio-based devices"); + return -EOPNOTSUPP; + } + + /* All messages here must start with '@' */ + if (!strcasecmp(argv[0], "@stats_create")) + r = message_stats_create(md, argc, argv, result, maxlen); + else if (!strcasecmp(argv[0], "@stats_delete")) + r = message_stats_delete(md, argc, argv); + else if (!strcasecmp(argv[0], "@stats_clear")) + r = message_stats_clear(md, argc, argv); + else if (!strcasecmp(argv[0], "@stats_list")) + r = message_stats_list(md, argc, argv, result, maxlen); + else if (!strcasecmp(argv[0], "@stats_print")) + r = message_stats_print(md, argc, argv, false, result, maxlen); + else if (!strcasecmp(argv[0], "@stats_print_clear")) + r = message_stats_print(md, argc, argv, true, result, maxlen); + else if (!strcasecmp(argv[0], "@stats_set_aux")) + r = message_stats_set_aux(md, argc, argv); + else + return 2; /* this wasn't a stats message */ + + if (r == -EINVAL) + DMWARN("Invalid parameters for message %s", argv[0]); + + return r; +} + +int __init dm_statistics_init(void) +{ + dm_stat_need_rcu_barrier = 0; + return 0; +} + +void dm_statistics_exit(void) +{ + if (dm_stat_need_rcu_barrier) + rcu_barrier(); + if (WARN_ON(shared_memory_amount)) + DMCRIT("shared_memory_amount leaked: %lu", shared_memory_amount); +} + +module_param_named(stats_current_allocated_bytes, shared_memory_amount, ulong, S_IRUGO); +MODULE_PARM_DESC(stats_current_allocated_bytes, "Memory currently used by statistics"); diff --git a/drivers/md/dm-stats.h b/drivers/md/dm-stats.h new file mode 100644 index 000000000000..e7c4984bf235 --- /dev/null +++ b/drivers/md/dm-stats.h @@ -0,0 +1,40 @@ +#ifndef DM_STATS_H +#define DM_STATS_H + +#include <linux/types.h> +#include <linux/mutex.h> +#include <linux/list.h> + +int dm_statistics_init(void); +void dm_statistics_exit(void); + +struct dm_stats { + struct mutex mutex; + struct list_head list; /* list of struct dm_stat */ + struct dm_stats_last_position __percpu *last; + sector_t last_sector; + unsigned last_rw; +}; + +struct dm_stats_aux { + bool merged; +}; + +void dm_stats_init(struct dm_stats *st); +void dm_stats_cleanup(struct dm_stats *st); + +struct mapped_device; + +int dm_stats_message(struct mapped_device *md, unsigned argc, char **argv, + char *result, unsigned maxlen); + +void dm_stats_account_io(struct dm_stats *stats, unsigned long bi_rw, + sector_t bi_sector, unsigned bi_sectors, bool end, + unsigned long duration, struct dm_stats_aux *aux); + +static inline bool dm_stats_used(struct dm_stats *st) +{ + return !list_empty(&st->list); +} + +#endif diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c index d907ca6227ce..73c1712dad96 100644 --- a/drivers/md/dm-stripe.c +++ b/drivers/md/dm-stripe.c @@ -4,6 +4,7 @@ * This file is released under the GPL. */ +#include "dm.h" #include <linux/device-mapper.h> #include <linux/module.h> diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index f221812b7dbc..8f8783533ac7 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -860,14 +860,17 @@ EXPORT_SYMBOL(dm_consume_args); static int dm_table_set_type(struct dm_table *t) { unsigned i; - unsigned bio_based = 0, request_based = 0; + unsigned bio_based = 0, request_based = 0, hybrid = 0; struct dm_target *tgt; struct dm_dev_internal *dd; struct list_head *devices; + unsigned live_md_type; for (i = 0; i < t->num_targets; i++) { tgt = t->targets + i; - if (dm_target_request_based(tgt)) + if (dm_target_hybrid(tgt)) + hybrid = 1; + else if (dm_target_request_based(tgt)) request_based = 1; else bio_based = 1; @@ -879,6 +882,19 @@ static int dm_table_set_type(struct dm_table *t) } } + if (hybrid && !bio_based && !request_based) { + /* + * The targets can work either way. + * Determine the type from the live device. + * Default to bio-based if device is new. + */ + live_md_type = dm_get_md_type(t->md); + if (live_md_type == DM_TYPE_REQUEST_BASED) + request_based = 1; + else + bio_based = 1; + } + if (bio_based) { /* We must use this table as bio-based */ t->type = DM_TYPE_BIO_BASED; diff --git a/drivers/md/dm-target.c b/drivers/md/dm-target.c index 37ba5db71cd9..242e3cec397a 100644 --- a/drivers/md/dm-target.c +++ b/drivers/md/dm-target.c @@ -131,12 +131,19 @@ static int io_err_map(struct dm_target *tt, struct bio *bio) return -EIO; } +static int io_err_map_rq(struct dm_target *ti, struct request *clone, + union map_info *map_context) +{ + return -EIO; +} + static struct target_type error_target = { .name = "error", - .version = {1, 1, 0}, + .version = {1, 2, 0}, .ctr = io_err_ctr, .dtr = io_err_dtr, .map = io_err_map, + .map_rq = io_err_map_rq, }; int __init dm_target_init(void) diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index 88f2f802d528..2c0cf511ec23 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c @@ -887,7 +887,8 @@ static int commit(struct pool *pool) r = dm_pool_commit_metadata(pool->pmd); if (r) - DMERR_LIMIT("commit failed: error = %d", r); + DMERR_LIMIT("%s: commit failed: error = %d", + dm_device_name(pool->pool_md), r); return r; } @@ -917,6 +918,13 @@ static int alloc_data_block(struct thin_c *tc, dm_block_t *result) unsigned long flags; struct pool *pool = tc->pool; + /* + * Once no_free_space is set we must not allow allocation to succeed. + * Otherwise it is difficult to explain, debug, test and support. + */ + if (pool->no_free_space) + return -ENOSPC; + r = dm_pool_get_free_block_count(pool->pmd, &free_blocks); if (r) return r; @@ -931,31 +939,30 @@ static int alloc_data_block(struct thin_c *tc, dm_block_t *result) } if (!free_blocks) { - if (pool->no_free_space) - return -ENOSPC; - else { - /* - * Try to commit to see if that will free up some - * more space. - */ - (void) commit_or_fallback(pool); + /* + * Try to commit to see if that will free up some + * more space. + */ + (void) commit_or_fallback(pool); - r = dm_pool_get_free_block_count(pool->pmd, &free_blocks); - if (r) - return r; + r = dm_pool_get_free_block_count(pool->pmd, &free_blocks); + if (r) + return r; - /* - * If we still have no space we set a flag to avoid - * doing all this checking and return -ENOSPC. - */ - if (!free_blocks) { - DMWARN("%s: no free space available.", - dm_device_name(pool->pool_md)); - spin_lock_irqsave(&pool->lock, flags); - pool->no_free_space = 1; - spin_unlock_irqrestore(&pool->lock, flags); - return -ENOSPC; - } + /* + * If we still have no space we set a flag to avoid + * doing all this checking and return -ENOSPC. This + * flag serves as a latch that disallows allocations from + * this pool until the admin takes action (e.g. resize or + * table reload). + */ + if (!free_blocks) { + DMWARN("%s: no free space available.", + dm_device_name(pool->pool_md)); + spin_lock_irqsave(&pool->lock, flags); + pool->no_free_space = 1; + spin_unlock_irqrestore(&pool->lock, flags); + return -ENOSPC; } } @@ -1085,6 +1092,7 @@ static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block, { int r; dm_block_t data_block; + struct pool *pool = tc->pool; r = alloc_data_block(tc, &data_block); switch (r) { @@ -1094,13 +1102,14 @@ static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block, break; case -ENOSPC: - no_space(tc->pool, cell); + no_space(pool, cell); break; default: DMERR_LIMIT("%s: alloc_data_block() failed: error = %d", __func__, r); - cell_error(tc->pool, cell); + set_pool_mode(pool, PM_READ_ONLY); + cell_error(pool, cell); break; } } @@ -1386,7 +1395,8 @@ static void set_pool_mode(struct pool *pool, enum pool_mode mode) switch (mode) { case PM_FAIL: - DMERR("switching pool to failure mode"); + DMERR("%s: switching pool to failure mode", + dm_device_name(pool->pool_md)); pool->process_bio = process_bio_fail; pool->process_discard = process_bio_fail; pool->process_prepared_mapping = process_prepared_mapping_fail; @@ -1394,10 +1404,12 @@ static void set_pool_mode(struct pool *pool, enum pool_mode mode) break; case PM_READ_ONLY: - DMERR("switching pool to read-only mode"); + DMERR("%s: switching pool to read-only mode", + dm_device_name(pool->pool_md)); r = dm_pool_abort_metadata(pool->pmd); if (r) { - DMERR("aborting transaction failed"); + DMERR("%s: aborting transaction failed", + dm_device_name(pool->pool_md)); set_pool_mode(pool, PM_FAIL); } else { dm_pool_metadata_read_only(pool->pmd); @@ -2083,6 +2095,7 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv) * them down to the data device. The thin device's discard * processing will cause mappings to be removed from the btree. */ + ti->discard_zeroes_data_unsupported = true; if (pf.discard_enabled && pf.discard_passdown) { ti->num_discard_bios = 1; @@ -2092,7 +2105,6 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv) * thin devices' discard limits consistent). */ ti->discards_supported = true; - ti->discard_zeroes_data_unsupported = true; } ti->private = pt; @@ -2156,19 +2168,22 @@ static int maybe_resize_data_dev(struct dm_target *ti, bool *need_commit) r = dm_pool_get_data_dev_size(pool->pmd, &sb_data_size); if (r) { - DMERR("failed to retrieve data device size"); + DMERR("%s: failed to retrieve data device size", + dm_device_name(pool->pool_md)); return r; } if (data_size < sb_data_size) { - DMERR("pool target (%llu blocks) too small: expected %llu", + DMERR("%s: pool target (%llu blocks) too small: expected %llu", + dm_device_name(pool->pool_md), (unsigned long long)data_size, sb_data_size); return -EINVAL; } else if (data_size > sb_data_size) { r = dm_pool_resize_data_dev(pool->pmd, data_size); if (r) { - DMERR("failed to resize data device"); + DMERR("%s: failed to resize data device", + dm_device_name(pool->pool_md)); set_pool_mode(pool, PM_READ_ONLY); return r; } @@ -2192,19 +2207,22 @@ static int maybe_resize_metadata_dev(struct dm_target *ti, bool *need_commit) r = dm_pool_get_metadata_dev_size(pool->pmd, &sb_metadata_dev_size); if (r) { - DMERR("failed to retrieve data device size"); + DMERR("%s: failed to retrieve metadata device size", + dm_device_name(pool->pool_md)); return r; } if (metadata_dev_size < sb_metadata_dev_size) { - DMERR("metadata device (%llu blocks) too small: expected %llu", + DMERR("%s: metadata device (%llu blocks) too small: expected %llu", + dm_device_name(pool->pool_md), metadata_dev_size, sb_metadata_dev_size); return -EINVAL; } else if (metadata_dev_size > sb_metadata_dev_size) { r = dm_pool_resize_metadata_dev(pool->pmd, metadata_dev_size); if (r) { - DMERR("failed to resize metadata device"); + DMERR("%s: failed to resize metadata device", + dm_device_name(pool->pool_md)); return r; } @@ -2530,37 +2548,43 @@ static void pool_status(struct dm_target *ti, status_type_t type, r = dm_pool_get_metadata_transaction_id(pool->pmd, &transaction_id); if (r) { - DMERR("dm_pool_get_metadata_transaction_id returned %d", r); + DMERR("%s: dm_pool_get_metadata_transaction_id returned %d", + dm_device_name(pool->pool_md), r); goto err; } r = dm_pool_get_free_metadata_block_count(pool->pmd, &nr_free_blocks_metadata); if (r) { - DMERR("dm_pool_get_free_metadata_block_count returned %d", r); + DMERR("%s: dm_pool_get_free_metadata_block_count returned %d", + dm_device_name(pool->pool_md), r); goto err; } r = dm_pool_get_metadata_dev_size(pool->pmd, &nr_blocks_metadata); if (r) { - DMERR("dm_pool_get_metadata_dev_size returned %d", r); + DMERR("%s: dm_pool_get_metadata_dev_size returned %d", + dm_device_name(pool->pool_md), r); goto err; } r = dm_pool_get_free_block_count(pool->pmd, &nr_free_blocks_data); if (r) { - DMERR("dm_pool_get_free_block_count returned %d", r); + DMERR("%s: dm_pool_get_free_block_count returned %d", + dm_device_name(pool->pool_md), r); goto err; } r = dm_pool_get_data_dev_size(pool->pmd, &nr_blocks_data); if (r) { - DMERR("dm_pool_get_data_dev_size returned %d", r); + DMERR("%s: dm_pool_get_data_dev_size returned %d", + dm_device_name(pool->pool_md), r); goto err; } r = dm_pool_get_metadata_snap(pool->pmd, &held_root); if (r) { - DMERR("dm_pool_get_metadata_snap returned %d", r); + DMERR("%s: dm_pool_get_metadata_snap returned %d", + dm_device_name(pool->pool_md), r); goto err; } @@ -2648,17 +2672,33 @@ static void pool_io_hints(struct dm_target *ti, struct queue_limits *limits) { struct pool_c *pt = ti->private; struct pool *pool = pt->pool; + uint64_t io_opt_sectors = limits->io_opt >> SECTOR_SHIFT; - blk_limits_io_min(limits, 0); - blk_limits_io_opt(limits, pool->sectors_per_block << SECTOR_SHIFT); + /* + * If the system-determined stacked limits are compatible with the + * pool's blocksize (io_opt is a factor) do not override them. + */ + if (io_opt_sectors < pool->sectors_per_block || + do_div(io_opt_sectors, pool->sectors_per_block)) { + blk_limits_io_min(limits, 0); + blk_limits_io_opt(limits, pool->sectors_per_block << SECTOR_SHIFT); + } /* * pt->adjusted_pf is a staging area for the actual features to use. * They get transferred to the live pool in bind_control_target() * called from pool_preresume(). */ - if (!pt->adjusted_pf.discard_enabled) + if (!pt->adjusted_pf.discard_enabled) { + /* + * Must explicitly disallow stacking discard limits otherwise the + * block layer will stack them if pool's data device has support. + * QUEUE_FLAG_DISCARD wouldn't be set but there is no way for the + * user to see that, so make sure to set all discard limits to 0. + */ + limits->discard_granularity = 0; return; + } disable_passdown_if_not_supported(pt); @@ -2669,7 +2709,7 @@ static struct target_type pool_target = { .name = "thin-pool", .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE | DM_TARGET_IMMUTABLE, - .version = {1, 8, 0}, + .version = {1, 9, 0}, .module = THIS_MODULE, .ctr = pool_ctr, .dtr = pool_dtr, @@ -2794,10 +2834,10 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv) ti->per_bio_data_size = sizeof(struct dm_thin_endio_hook); /* In case the pool supports discards, pass them on. */ + ti->discard_zeroes_data_unsupported = true; if (tc->pool->pf.discard_enabled) { ti->discards_supported = true; ti->num_discard_bios = 1; - ti->discard_zeroes_data_unsupported = true; /* Discard bios must be split on a block boundary */ ti->split_discard_bios = true; } @@ -2956,7 +2996,7 @@ static int thin_iterate_devices(struct dm_target *ti, static struct target_type thin_target = { .name = "thin", - .version = {1, 8, 0}, + .version = {1, 9, 0}, .module = THIS_MODULE, .ctr = thin_ctr, .dtr = thin_dtr, diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 9e39d2b64bf8..b3e26c7d1417 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -60,6 +60,7 @@ struct dm_io { struct bio *bio; unsigned long start_time; spinlock_t endio_lock; + struct dm_stats_aux stats_aux; }; /* @@ -198,6 +199,8 @@ struct mapped_device { /* zero-length flush that will be cloned and submitted to targets */ struct bio flush_bio; + + struct dm_stats stats; }; /* @@ -208,10 +211,55 @@ struct dm_md_mempools { struct bio_set *bs; }; -#define MIN_IOS 256 +#define RESERVED_BIO_BASED_IOS 16 +#define RESERVED_REQUEST_BASED_IOS 256 +#define RESERVED_MAX_IOS 1024 static struct kmem_cache *_io_cache; static struct kmem_cache *_rq_tio_cache; +/* + * Bio-based DM's mempools' reserved IOs set by the user. + */ +static unsigned reserved_bio_based_ios = RESERVED_BIO_BASED_IOS; + +/* + * Request-based DM's mempools' reserved IOs set by the user. + */ +static unsigned reserved_rq_based_ios = RESERVED_REQUEST_BASED_IOS; + +static unsigned __dm_get_reserved_ios(unsigned *reserved_ios, + unsigned def, unsigned max) +{ + unsigned ios = ACCESS_ONCE(*reserved_ios); + unsigned modified_ios = 0; + + if (!ios) + modified_ios = def; + else if (ios > max) + modified_ios = max; + + if (modified_ios) { + (void)cmpxchg(reserved_ios, ios, modified_ios); + ios = modified_ios; + } + + return ios; +} + +unsigned dm_get_reserved_bio_based_ios(void) +{ + return __dm_get_reserved_ios(&reserved_bio_based_ios, + RESERVED_BIO_BASED_IOS, RESERVED_MAX_IOS); +} +EXPORT_SYMBOL_GPL(dm_get_reserved_bio_based_ios); + +unsigned dm_get_reserved_rq_based_ios(void) +{ + return __dm_get_reserved_ios(&reserved_rq_based_ios, + RESERVED_REQUEST_BASED_IOS, RESERVED_MAX_IOS); +} +EXPORT_SYMBOL_GPL(dm_get_reserved_rq_based_ios); + static int __init local_init(void) { int r = -ENOMEM; @@ -269,6 +317,7 @@ static int (*_inits[])(void) __initdata = { dm_io_init, dm_kcopyd_init, dm_interface_init, + dm_statistics_init, }; static void (*_exits[])(void) = { @@ -279,6 +328,7 @@ static void (*_exits[])(void) = { dm_io_exit, dm_kcopyd_exit, dm_interface_exit, + dm_statistics_exit, }; static int __init dm_init(void) @@ -384,6 +434,16 @@ int dm_lock_for_deletion(struct mapped_device *md) return r; } +sector_t dm_get_size(struct mapped_device *md) +{ + return get_capacity(md->disk); +} + +struct dm_stats *dm_get_stats(struct mapped_device *md) +{ + return &md->stats; +} + static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo) { struct mapped_device *md = bdev->bd_disk->private_data; @@ -466,8 +526,9 @@ static int md_in_flight(struct mapped_device *md) static void start_io_acct(struct dm_io *io) { struct mapped_device *md = io->md; + struct bio *bio = io->bio; int cpu; - int rw = bio_data_dir(io->bio); + int rw = bio_data_dir(bio); io->start_time = jiffies; @@ -476,6 +537,10 @@ static void start_io_acct(struct dm_io *io) part_stat_unlock(); atomic_set(&dm_disk(md)->part0.in_flight[rw], atomic_inc_return(&md->pending[rw])); + + if (unlikely(dm_stats_used(&md->stats))) + dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_sector, + bio_sectors(bio), false, 0, &io->stats_aux); } static void end_io_acct(struct dm_io *io) @@ -491,6 +556,10 @@ static void end_io_acct(struct dm_io *io) part_stat_add(cpu, &dm_disk(md)->part0, ticks[rw], duration); part_stat_unlock(); + if (unlikely(dm_stats_used(&md->stats))) + dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_sector, + bio_sectors(bio), true, duration, &io->stats_aux); + /* * After this is decremented the bio must not be touched if it is * a flush. @@ -1519,7 +1588,7 @@ static void _dm_request(struct request_queue *q, struct bio *bio) return; } -static int dm_request_based(struct mapped_device *md) +int dm_request_based(struct mapped_device *md) { return blk_queue_stackable(md->queue); } @@ -1946,8 +2015,7 @@ static struct mapped_device *alloc_dev(int minor) add_disk(md->disk); format_dev_t(md->name, MKDEV(_major, minor)); - md->wq = alloc_workqueue("kdmflush", - WQ_NON_REENTRANT | WQ_MEM_RECLAIM, 0); + md->wq = alloc_workqueue("kdmflush", WQ_MEM_RECLAIM, 0); if (!md->wq) goto bad_thread; @@ -1959,6 +2027,8 @@ static struct mapped_device *alloc_dev(int minor) md->flush_bio.bi_bdev = md->bdev; md->flush_bio.bi_rw = WRITE_FLUSH; + dm_stats_init(&md->stats); + /* Populate the mapping, nobody knows we exist yet */ spin_lock(&_minor_lock); old_md = idr_replace(&_minor_idr, md, minor); @@ -2010,6 +2080,7 @@ static void free_dev(struct mapped_device *md) put_disk(md->disk); blk_cleanup_queue(md->queue); + dm_stats_cleanup(&md->stats); module_put(THIS_MODULE); kfree(md); } @@ -2151,7 +2222,7 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t, /* * Wipe any geometry if the size of the table changed. */ - if (size != get_capacity(md->disk)) + if (size != dm_get_size(md)) memset(&md->geometry, 0, sizeof(md->geometry)); __set_size(md, size); @@ -2236,11 +2307,13 @@ void dm_unlock_md_type(struct mapped_device *md) void dm_set_md_type(struct mapped_device *md, unsigned type) { + BUG_ON(!mutex_is_locked(&md->type_lock)); md->type = type; } unsigned dm_get_md_type(struct mapped_device *md) { + BUG_ON(!mutex_is_locked(&md->type_lock)); return md->type; } @@ -2250,6 +2323,17 @@ struct target_type *dm_get_immutable_target_type(struct mapped_device *md) } /* + * The queue_limits are only valid as long as you have a reference + * count on 'md'. + */ +struct queue_limits *dm_get_queue_limits(struct mapped_device *md) +{ + BUG_ON(!atomic_read(&md->holders)); + return &md->queue->limits; +} +EXPORT_SYMBOL_GPL(dm_get_queue_limits); + +/* * Fully initialize a request-based queue (->elevator, ->request_fn, etc). */ static int dm_init_request_based_queue(struct mapped_device *md) @@ -2695,6 +2779,38 @@ out: return r; } +/* + * Internal suspend/resume works like userspace-driven suspend. It waits + * until all bios finish and prevents issuing new bios to the target drivers. + * It may be used only from the kernel. + * + * Internal suspend holds md->suspend_lock, which prevents interaction with + * userspace-driven suspend. + */ + +void dm_internal_suspend(struct mapped_device *md) +{ + mutex_lock(&md->suspend_lock); + if (dm_suspended_md(md)) + return; + + set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); + synchronize_srcu(&md->io_barrier); + flush_workqueue(md->wq); + dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE); +} + +void dm_internal_resume(struct mapped_device *md) +{ + if (dm_suspended_md(md)) + goto done; + + dm_queue_flush(md); + +done: + mutex_unlock(&md->suspend_lock); +} + /*----------------------------------------------------------------- * Event notification. *---------------------------------------------------------------*/ @@ -2802,18 +2918,18 @@ struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity, u if (type == DM_TYPE_BIO_BASED) { cachep = _io_cache; - pool_size = 16; + pool_size = dm_get_reserved_bio_based_ios(); front_pad = roundup(per_bio_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone); } else if (type == DM_TYPE_REQUEST_BASED) { cachep = _rq_tio_cache; - pool_size = MIN_IOS; + pool_size = dm_get_reserved_rq_based_ios(); front_pad = offsetof(struct dm_rq_clone_bio_info, clone); /* per_bio_data_size is not used. See __bind_mempools(). */ WARN_ON(per_bio_data_size != 0); } else goto out; - pools->io_pool = mempool_create_slab_pool(MIN_IOS, cachep); + pools->io_pool = mempool_create_slab_pool(pool_size, cachep); if (!pools->io_pool) goto out; @@ -2864,6 +2980,13 @@ module_exit(dm_exit); module_param(major, uint, 0); MODULE_PARM_DESC(major, "The major number of the device mapper"); + +module_param(reserved_bio_based_ios, uint, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(reserved_bio_based_ios, "Reserved IOs in bio-based mempools"); + +module_param(reserved_rq_based_ios, uint, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(reserved_rq_based_ios, "Reserved IOs in request-based mempools"); + MODULE_DESCRIPTION(DM_NAME " driver"); MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>"); MODULE_LICENSE("GPL"); diff --git a/drivers/md/dm.h b/drivers/md/dm.h index 45b97da1bd06..1d1ad7b7e527 100644 --- a/drivers/md/dm.h +++ b/drivers/md/dm.h @@ -16,6 +16,8 @@ #include <linux/blkdev.h> #include <linux/hdreg.h> +#include "dm-stats.h" + /* * Suspend feature flags */ @@ -89,10 +91,21 @@ int dm_setup_md_queue(struct mapped_device *md); #define dm_target_is_valid(t) ((t)->table) /* + * To check whether the target type is bio-based or not (request-based). + */ +#define dm_target_bio_based(t) ((t)->type->map != NULL) + +/* * To check whether the target type is request-based or not (bio-based). */ #define dm_target_request_based(t) ((t)->type->map_rq != NULL) +/* + * To check whether the target type is a hybrid (capable of being + * either request-based or bio-based). + */ +#define dm_target_hybrid(t) (dm_target_bio_based(t) && dm_target_request_based(t)) + /*----------------------------------------------------------------- * A registry of target types. *---------------------------------------------------------------*/ @@ -146,10 +159,16 @@ void dm_destroy(struct mapped_device *md); void dm_destroy_immediate(struct mapped_device *md); int dm_open_count(struct mapped_device *md); int dm_lock_for_deletion(struct mapped_device *md); +int dm_request_based(struct mapped_device *md); +sector_t dm_get_size(struct mapped_device *md); +struct dm_stats *dm_get_stats(struct mapped_device *md); int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action, unsigned cookie); +void dm_internal_suspend(struct mapped_device *md); +void dm_internal_resume(struct mapped_device *md); + int dm_io_init(void); void dm_io_exit(void); @@ -162,4 +181,15 @@ void dm_kcopyd_exit(void); struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity, unsigned per_bio_data_size); void dm_free_md_mempools(struct dm_md_mempools *pools); +/* + * Helpers that are used by DM core + */ +unsigned dm_get_reserved_bio_based_ios(void); +unsigned dm_get_reserved_rq_based_ios(void); + +static inline bool dm_message_test_buffer_overflow(char *result, unsigned maxlen) +{ + return !maxlen || strlen(result) + 1 >= maxlen; +} + #endif diff --git a/drivers/md/md.c b/drivers/md/md.c index 9f13e13506ef..adf4d7e1d5e1 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -1180,7 +1180,7 @@ static int super_90_validate(struct mddev *mddev, struct md_rdev *rdev) mddev->bitmap_info.offset = mddev->bitmap_info.default_offset; mddev->bitmap_info.space = - mddev->bitmap_info.space; + mddev->bitmap_info.default_space; } } else if (mddev->pers == NULL) { @@ -3429,7 +3429,7 @@ safe_delay_store(struct mddev *mddev, const char *cbuf, size_t len) mddev->safemode_delay = (msec*HZ)/1000; if (mddev->safemode_delay == 0) mddev->safemode_delay = 1; - if (mddev->safemode_delay < old_delay) + if (mddev->safemode_delay < old_delay || old_delay == 0) md_safemode_timeout((unsigned long)mddev); } return len; @@ -5144,7 +5144,7 @@ int md_run(struct mddev *mddev) set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); - if (mddev->flags) + if (mddev->flags & MD_UPDATE_SB_FLAGS) md_update_sb(mddev, 0); md_new_event(mddev); @@ -5289,7 +5289,7 @@ static void __md_stop_writes(struct mddev *mddev) md_super_wait(mddev); if (mddev->ro == 0 && - (!mddev->in_sync || mddev->flags)) { + (!mddev->in_sync || (mddev->flags & MD_UPDATE_SB_FLAGS))) { /* mark array as shutdown cleanly */ mddev->in_sync = 1; md_update_sb(mddev, 1); @@ -5337,8 +5337,14 @@ static int md_set_readonly(struct mddev *mddev, struct block_device *bdev) err = -EBUSY; goto out; } - if (bdev) - sync_blockdev(bdev); + if (bdev && !test_bit(MD_STILL_CLOSED, &mddev->flags)) { + /* Someone opened the device since we flushed it + * so page cache could be dirty and it is too late + * to flush. So abort + */ + mutex_unlock(&mddev->open_mutex); + return -EBUSY; + } if (mddev->pers) { __md_stop_writes(mddev); @@ -5373,14 +5379,14 @@ static int do_md_stop(struct mddev * mddev, int mode, mutex_unlock(&mddev->open_mutex); return -EBUSY; } - if (bdev) - /* It is possible IO was issued on some other - * open file which was closed before we took ->open_mutex. - * As that was not the last close __blkdev_put will not - * have called sync_blockdev, so we must. + if (bdev && !test_bit(MD_STILL_CLOSED, &mddev->flags)) { + /* Someone opened the device since we flushed it + * so page cache could be dirty and it is too late + * to flush. So abort */ - sync_blockdev(bdev); - + mutex_unlock(&mddev->open_mutex); + return -EBUSY; + } if (mddev->pers) { if (mddev->ro) set_disk_ro(disk, 0); @@ -5628,10 +5634,7 @@ static int get_bitmap_file(struct mddev * mddev, void __user * arg) char *ptr, *buf = NULL; int err = -ENOMEM; - if (md_allow_write(mddev)) - file = kmalloc(sizeof(*file), GFP_NOIO); - else - file = kmalloc(sizeof(*file), GFP_KERNEL); + file = kmalloc(sizeof(*file), GFP_NOIO); if (!file) goto out; @@ -6420,6 +6423,20 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode, !test_bit(MD_RECOVERY_NEEDED, &mddev->flags), msecs_to_jiffies(5000)); + if (cmd == STOP_ARRAY || cmd == STOP_ARRAY_RO) { + /* Need to flush page cache, and ensure no-one else opens + * and writes + */ + mutex_lock(&mddev->open_mutex); + if (atomic_read(&mddev->openers) > 1) { + mutex_unlock(&mddev->open_mutex); + err = -EBUSY; + goto abort; + } + set_bit(MD_STILL_CLOSED, &mddev->flags); + mutex_unlock(&mddev->open_mutex); + sync_blockdev(bdev); + } err = mddev_lock(mddev); if (err) { printk(KERN_INFO @@ -6673,6 +6690,7 @@ static int md_open(struct block_device *bdev, fmode_t mode) err = 0; atomic_inc(&mddev->openers); + clear_bit(MD_STILL_CLOSED, &mddev->flags); mutex_unlock(&mddev->open_mutex); check_disk_change(bdev); @@ -7817,7 +7835,7 @@ void md_check_recovery(struct mddev *mddev) sysfs_notify_dirent_safe(mddev->sysfs_state); } - if (mddev->flags) + if (mddev->flags & MD_UPDATE_SB_FLAGS) md_update_sb(mddev, 0); if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) && diff --git a/drivers/md/md.h b/drivers/md/md.h index 20f02c0b5f2d..608050c43f17 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h @@ -204,12 +204,16 @@ struct mddev { struct md_personality *pers; dev_t unit; int md_minor; - struct list_head disks; + struct list_head disks; unsigned long flags; #define MD_CHANGE_DEVS 0 /* Some device status has changed */ #define MD_CHANGE_CLEAN 1 /* transition to or from 'clean' */ #define MD_CHANGE_PENDING 2 /* switch from 'clean' to 'active' in progress */ +#define MD_UPDATE_SB_FLAGS (1 | 2 | 4) /* If these are set, md_update_sb needed */ #define MD_ARRAY_FIRST_USE 3 /* First use of array, needs initialization */ +#define MD_STILL_CLOSED 4 /* If set, then array has not been opened since + * md_ioctl checked on it. + */ int suspended; atomic_t active_io; @@ -218,7 +222,7 @@ struct mddev { * are happening, so run/ * takeover/stop are not safe */ - int ready; /* See when safe to pass + int ready; /* See when safe to pass * IO requests down */ struct gendisk *gendisk; diff --git a/drivers/md/persistent-data/dm-block-manager.c b/drivers/md/persistent-data/dm-block-manager.c index 81b513890e2b..a7e8bf296388 100644 --- a/drivers/md/persistent-data/dm-block-manager.c +++ b/drivers/md/persistent-data/dm-block-manager.c @@ -615,6 +615,11 @@ int dm_bm_flush_and_unlock(struct dm_block_manager *bm, } EXPORT_SYMBOL_GPL(dm_bm_flush_and_unlock); +void dm_bm_prefetch(struct dm_block_manager *bm, dm_block_t b) +{ + dm_bufio_prefetch(bm->bufio, b, 1); +} + void dm_bm_set_read_only(struct dm_block_manager *bm) { bm->read_only = true; diff --git a/drivers/md/persistent-data/dm-block-manager.h b/drivers/md/persistent-data/dm-block-manager.h index be5bff61be28..9a82083a66b6 100644 --- a/drivers/md/persistent-data/dm-block-manager.h +++ b/drivers/md/persistent-data/dm-block-manager.h @@ -108,6 +108,11 @@ int dm_bm_unlock(struct dm_block *b); int dm_bm_flush_and_unlock(struct dm_block_manager *bm, struct dm_block *superblock); + /* + * Request data be prefetched into the cache. + */ +void dm_bm_prefetch(struct dm_block_manager *bm, dm_block_t b); + /* * Switches the bm to a read only mode. Once read-only mode * has been entered the following functions will return -EPERM. diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c index 35865425e4b4..468e371ee9b2 100644 --- a/drivers/md/persistent-data/dm-btree.c +++ b/drivers/md/persistent-data/dm-btree.c @@ -161,6 +161,7 @@ struct frame { }; struct del_stack { + struct dm_btree_info *info; struct dm_transaction_manager *tm; int top; struct frame spine[MAX_SPINE_DEPTH]; @@ -183,6 +184,20 @@ static int unprocessed_frames(struct del_stack *s) return s->top >= 0; } +static void prefetch_children(struct del_stack *s, struct frame *f) +{ + unsigned i; + struct dm_block_manager *bm = dm_tm_get_bm(s->tm); + + for (i = 0; i < f->nr_children; i++) + dm_bm_prefetch(bm, value64(f->n, i)); +} + +static bool is_internal_level(struct dm_btree_info *info, struct frame *f) +{ + return f->level < (info->levels - 1); +} + static int push_frame(struct del_stack *s, dm_block_t b, unsigned level) { int r; @@ -205,6 +220,7 @@ static int push_frame(struct del_stack *s, dm_block_t b, unsigned level) dm_tm_dec(s->tm, b); else { + uint32_t flags; struct frame *f = s->spine + ++s->top; r = dm_tm_read_lock(s->tm, b, &btree_node_validator, &f->b); @@ -217,6 +233,10 @@ static int push_frame(struct del_stack *s, dm_block_t b, unsigned level) f->level = level; f->nr_children = le32_to_cpu(f->n->header.nr_entries); f->current_child = 0; + + flags = le32_to_cpu(f->n->header.flags); + if (flags & INTERNAL_NODE || is_internal_level(s->info, f)) + prefetch_children(s, f); } return 0; @@ -230,11 +250,6 @@ static void pop_frame(struct del_stack *s) dm_tm_unlock(s->tm, f->b); } -static bool is_internal_level(struct dm_btree_info *info, struct frame *f) -{ - return f->level < (info->levels - 1); -} - int dm_btree_del(struct dm_btree_info *info, dm_block_t root) { int r; @@ -243,6 +258,7 @@ int dm_btree_del(struct dm_btree_info *info, dm_block_t root) s = kmalloc(sizeof(*s), GFP_KERNEL); if (!s) return -ENOMEM; + s->info = info; s->tm = info->tm; s->top = -1; @@ -287,7 +303,7 @@ int dm_btree_del(struct dm_btree_info *info, dm_block_t root) info->value_type.dec(info->value_type.context, value_ptr(f->n, i)); } - f->current_child = f->nr_children; + pop_frame(s); } } diff --git a/drivers/md/persistent-data/dm-space-map-common.c b/drivers/md/persistent-data/dm-space-map-common.c index 3e7a88d99eb0..6058569fe86c 100644 --- a/drivers/md/persistent-data/dm-space-map-common.c +++ b/drivers/md/persistent-data/dm-space-map-common.c @@ -292,16 +292,11 @@ int sm_ll_lookup_bitmap(struct ll_disk *ll, dm_block_t b, uint32_t *result) return dm_tm_unlock(ll->tm, blk); } -int sm_ll_lookup(struct ll_disk *ll, dm_block_t b, uint32_t *result) +static int sm_ll_lookup_big_ref_count(struct ll_disk *ll, dm_block_t b, + uint32_t *result) { __le32 le_rc; - int r = sm_ll_lookup_bitmap(ll, b, result); - - if (r) - return r; - - if (*result != 3) - return r; + int r; r = dm_btree_lookup(&ll->ref_count_info, ll->ref_count_root, &b, &le_rc); if (r < 0) @@ -312,6 +307,19 @@ int sm_ll_lookup(struct ll_disk *ll, dm_block_t b, uint32_t *result) return r; } +int sm_ll_lookup(struct ll_disk *ll, dm_block_t b, uint32_t *result) +{ + int r = sm_ll_lookup_bitmap(ll, b, result); + + if (r) + return r; + + if (*result != 3) + return r; + + return sm_ll_lookup_big_ref_count(ll, b, result); +} + int sm_ll_find_free_block(struct ll_disk *ll, dm_block_t begin, dm_block_t end, dm_block_t *result) { @@ -372,11 +380,12 @@ int sm_ll_find_free_block(struct ll_disk *ll, dm_block_t begin, return -ENOSPC; } -int sm_ll_insert(struct ll_disk *ll, dm_block_t b, - uint32_t ref_count, enum allocation_event *ev) +static int sm_ll_mutate(struct ll_disk *ll, dm_block_t b, + uint32_t (*mutator)(void *context, uint32_t old), + void *context, enum allocation_event *ev) { int r; - uint32_t bit, old; + uint32_t bit, old, ref_count; struct dm_block *nb; dm_block_t index = b; struct disk_index_entry ie_disk; @@ -399,6 +408,14 @@ int sm_ll_insert(struct ll_disk *ll, dm_block_t b, bm_le = dm_bitmap_data(nb); old = sm_lookup_bitmap(bm_le, bit); + if (old > 2) { + r = sm_ll_lookup_big_ref_count(ll, b, &old); + if (r < 0) + return r; + } + + ref_count = mutator(context, old); + if (ref_count <= 2) { sm_set_bitmap(bm_le, bit, ref_count); @@ -448,31 +465,35 @@ int sm_ll_insert(struct ll_disk *ll, dm_block_t b, return ll->save_ie(ll, index, &ie_disk); } -int sm_ll_inc(struct ll_disk *ll, dm_block_t b, enum allocation_event *ev) +static uint32_t set_ref_count(void *context, uint32_t old) { - int r; - uint32_t rc; - - r = sm_ll_lookup(ll, b, &rc); - if (r) - return r; + return *((uint32_t *) context); +} - return sm_ll_insert(ll, b, rc + 1, ev); +int sm_ll_insert(struct ll_disk *ll, dm_block_t b, + uint32_t ref_count, enum allocation_event *ev) +{ + return sm_ll_mutate(ll, b, set_ref_count, &ref_count, ev); } -int sm_ll_dec(struct ll_disk *ll, dm_block_t b, enum allocation_event *ev) +static uint32_t inc_ref_count(void *context, uint32_t old) { - int r; - uint32_t rc; + return old + 1; +} - r = sm_ll_lookup(ll, b, &rc); - if (r) - return r; +int sm_ll_inc(struct ll_disk *ll, dm_block_t b, enum allocation_event *ev) +{ + return sm_ll_mutate(ll, b, inc_ref_count, NULL, ev); +} - if (!rc) - return -EINVAL; +static uint32_t dec_ref_count(void *context, uint32_t old) +{ + return old - 1; +} - return sm_ll_insert(ll, b, rc - 1, ev); +int sm_ll_dec(struct ll_disk *ll, dm_block_t b, enum allocation_event *ev) +{ + return sm_ll_mutate(ll, b, dec_ref_count, NULL, ev); } int sm_ll_commit(struct ll_disk *ll) diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 78ea44336e75..7ff4f252ca1a 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -53,6 +53,7 @@ #include <linux/cpu.h> #include <linux/slab.h> #include <linux/ratelimit.h> +#include <linux/nodemask.h> #include <trace/events/block.h> #include "md.h" @@ -60,6 +61,10 @@ #include "raid0.h" #include "bitmap.h" +#define cpu_to_group(cpu) cpu_to_node(cpu) +#define ANY_GROUP NUMA_NO_NODE + +static struct workqueue_struct *raid5_wq; /* * Stripe cache */ @@ -72,6 +77,7 @@ #define BYPASS_THRESHOLD 1 #define NR_HASH (PAGE_SIZE / sizeof(struct hlist_head)) #define HASH_MASK (NR_HASH - 1) +#define MAX_STRIPE_BATCH 8 static inline struct hlist_head *stripe_hash(struct r5conf *conf, sector_t sect) { @@ -200,6 +206,49 @@ static int stripe_operations_active(struct stripe_head *sh) test_bit(STRIPE_COMPUTE_RUN, &sh->state); } +static void raid5_wakeup_stripe_thread(struct stripe_head *sh) +{ + struct r5conf *conf = sh->raid_conf; + struct r5worker_group *group; + int thread_cnt; + int i, cpu = sh->cpu; + + if (!cpu_online(cpu)) { + cpu = cpumask_any(cpu_online_mask); + sh->cpu = cpu; + } + + if (list_empty(&sh->lru)) { + struct r5worker_group *group; + group = conf->worker_groups + cpu_to_group(cpu); + list_add_tail(&sh->lru, &group->handle_list); + group->stripes_cnt++; + sh->group = group; + } + + if (conf->worker_cnt_per_group == 0) { + md_wakeup_thread(conf->mddev->thread); + return; + } + + group = conf->worker_groups + cpu_to_group(sh->cpu); + + group->workers[0].working = true; + /* at least one worker should run to avoid race */ + queue_work_on(sh->cpu, raid5_wq, &group->workers[0].work); + + thread_cnt = group->stripes_cnt / MAX_STRIPE_BATCH - 1; + /* wakeup more workers */ + for (i = 1; i < conf->worker_cnt_per_group && thread_cnt > 0; i++) { + if (group->workers[i].working == false) { + group->workers[i].working = true; + queue_work_on(sh->cpu, raid5_wq, + &group->workers[i].work); + thread_cnt--; + } + } +} + static void do_release_stripe(struct r5conf *conf, struct stripe_head *sh) { BUG_ON(!list_empty(&sh->lru)); @@ -214,7 +263,12 @@ static void do_release_stripe(struct r5conf *conf, struct stripe_head *sh) else { clear_bit(STRIPE_DELAYED, &sh->state); clear_bit(STRIPE_BIT_DELAY, &sh->state); - list_add_tail(&sh->lru, &conf->handle_list); + if (conf->worker_cnt_per_group == 0) { + list_add_tail(&sh->lru, &conf->handle_list); + } else { + raid5_wakeup_stripe_thread(sh); + return; + } } md_wakeup_thread(conf->mddev->thread); } else { @@ -239,12 +293,62 @@ static void __release_stripe(struct r5conf *conf, struct stripe_head *sh) do_release_stripe(conf, sh); } +static struct llist_node *llist_reverse_order(struct llist_node *head) +{ + struct llist_node *new_head = NULL; + + while (head) { + struct llist_node *tmp = head; + head = head->next; + tmp->next = new_head; + new_head = tmp; + } + + return new_head; +} + +/* should hold conf->device_lock already */ +static int release_stripe_list(struct r5conf *conf) +{ + struct stripe_head *sh; + int count = 0; + struct llist_node *head; + + head = llist_del_all(&conf->released_stripes); + head = llist_reverse_order(head); + while (head) { + sh = llist_entry(head, struct stripe_head, release_list); + head = llist_next(head); + /* sh could be readded after STRIPE_ON_RELEASE_LIST is cleard */ + smp_mb(); + clear_bit(STRIPE_ON_RELEASE_LIST, &sh->state); + /* + * Don't worry the bit is set here, because if the bit is set + * again, the count is always > 1. This is true for + * STRIPE_ON_UNPLUG_LIST bit too. + */ + __release_stripe(conf, sh); + count++; + } + + return count; +} + static void release_stripe(struct stripe_head *sh) { struct r5conf *conf = sh->raid_conf; unsigned long flags; + bool wakeup; + if (test_and_set_bit(STRIPE_ON_RELEASE_LIST, &sh->state)) + goto slow_path; + wakeup = llist_add(&sh->release_list, &conf->released_stripes); + if (wakeup) + md_wakeup_thread(conf->mddev->thread); + return; +slow_path: local_irq_save(flags); + /* we are ok here if STRIPE_ON_RELEASE_LIST is set or not */ if (atomic_dec_and_lock(&sh->count, &conf->device_lock)) { do_release_stripe(conf, sh); spin_unlock(&conf->device_lock); @@ -359,6 +463,7 @@ static void init_stripe(struct stripe_head *sh, sector_t sector, int previous) raid5_build_block(sh, i, previous); } insert_hash(conf, sh); + sh->cpu = smp_processor_id(); } static struct stripe_head *__find_stripe(struct r5conf *conf, sector_t sector, @@ -491,7 +596,8 @@ get_active_stripe(struct r5conf *conf, sector_t sector, if (atomic_read(&sh->count)) { BUG_ON(!list_empty(&sh->lru) && !test_bit(STRIPE_EXPANDING, &sh->state) - && !test_bit(STRIPE_ON_UNPLUG_LIST, &sh->state)); + && !test_bit(STRIPE_ON_UNPLUG_LIST, &sh->state) + && !test_bit(STRIPE_ON_RELEASE_LIST, &sh->state)); } else { if (!test_bit(STRIPE_HANDLE, &sh->state)) atomic_inc(&conf->active_stripes); @@ -499,6 +605,10 @@ get_active_stripe(struct r5conf *conf, sector_t sector, !test_bit(STRIPE_EXPANDING, &sh->state)) BUG(); list_del_init(&sh->lru); + if (sh->group) { + sh->group->stripes_cnt--; + sh->group = NULL; + } } } } while (sh == NULL); @@ -3779,6 +3889,7 @@ static void raid5_activate_delayed(struct r5conf *conf) if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) atomic_inc(&conf->preread_active_stripes); list_add_tail(&sh->lru, &conf->hold_list); + raid5_wakeup_stripe_thread(sh); } } } @@ -4058,18 +4169,35 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio) * head of the hold_list has changed, i.e. the head was promoted to the * handle_list. */ -static struct stripe_head *__get_priority_stripe(struct r5conf *conf) +static struct stripe_head *__get_priority_stripe(struct r5conf *conf, int group) { - struct stripe_head *sh; + struct stripe_head *sh = NULL, *tmp; + struct list_head *handle_list = NULL; + struct r5worker_group *wg = NULL; + + if (conf->worker_cnt_per_group == 0) { + handle_list = &conf->handle_list; + } else if (group != ANY_GROUP) { + handle_list = &conf->worker_groups[group].handle_list; + wg = &conf->worker_groups[group]; + } else { + int i; + for (i = 0; i < conf->group_cnt; i++) { + handle_list = &conf->worker_groups[i].handle_list; + wg = &conf->worker_groups[i]; + if (!list_empty(handle_list)) + break; + } + } pr_debug("%s: handle: %s hold: %s full_writes: %d bypass_count: %d\n", __func__, - list_empty(&conf->handle_list) ? "empty" : "busy", + list_empty(handle_list) ? "empty" : "busy", list_empty(&conf->hold_list) ? "empty" : "busy", atomic_read(&conf->pending_full_writes), conf->bypass_count); - if (!list_empty(&conf->handle_list)) { - sh = list_entry(conf->handle_list.next, typeof(*sh), lru); + if (!list_empty(handle_list)) { + sh = list_entry(handle_list->next, typeof(*sh), lru); if (list_empty(&conf->hold_list)) conf->bypass_count = 0; @@ -4087,14 +4215,32 @@ static struct stripe_head *__get_priority_stripe(struct r5conf *conf) ((conf->bypass_threshold && conf->bypass_count > conf->bypass_threshold) || atomic_read(&conf->pending_full_writes) == 0)) { - sh = list_entry(conf->hold_list.next, - typeof(*sh), lru); - conf->bypass_count -= conf->bypass_threshold; - if (conf->bypass_count < 0) - conf->bypass_count = 0; - } else + + list_for_each_entry(tmp, &conf->hold_list, lru) { + if (conf->worker_cnt_per_group == 0 || + group == ANY_GROUP || + !cpu_online(tmp->cpu) || + cpu_to_group(tmp->cpu) == group) { + sh = tmp; + break; + } + } + + if (sh) { + conf->bypass_count -= conf->bypass_threshold; + if (conf->bypass_count < 0) + conf->bypass_count = 0; + } + wg = NULL; + } + + if (!sh) return NULL; + if (wg) { + wg->stripes_cnt--; + sh->group = NULL; + } list_del_init(&sh->lru); atomic_inc(&sh->count); BUG_ON(atomic_read(&sh->count) != 1); @@ -4127,6 +4273,10 @@ static void raid5_unplug(struct blk_plug_cb *blk_cb, bool from_schedule) */ smp_mb__before_clear_bit(); clear_bit(STRIPE_ON_UNPLUG_LIST, &sh->state); + /* + * STRIPE_ON_RELEASE_LIST could be set here. In that + * case, the count is always > 1 here + */ __release_stripe(conf, sh); cnt++; } @@ -4286,8 +4436,10 @@ static void make_request(struct mddev *mddev, struct bio * bi) for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) { DEFINE_WAIT(w); int previous; + int seq; retry: + seq = read_seqcount_begin(&conf->gen_lock); previous = 0; prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE); if (unlikely(conf->reshape_progress != MaxSector)) { @@ -4320,7 +4472,7 @@ static void make_request(struct mddev *mddev, struct bio * bi) previous, &dd_idx, NULL); pr_debug("raid456: make_request, sector %llu logical %llu\n", - (unsigned long long)new_sector, + (unsigned long long)new_sector, (unsigned long long)logical_sector); sh = get_active_stripe(conf, new_sector, previous, @@ -4349,6 +4501,13 @@ static void make_request(struct mddev *mddev, struct bio * bi) goto retry; } } + if (read_seqcount_retry(&conf->gen_lock, seq)) { + /* Might have got the wrong stripe_head + * by accident + */ + release_stripe(sh); + goto retry; + } if (rw == WRITE && logical_sector >= mddev->suspend_lo && @@ -4788,14 +4947,14 @@ static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio) return handled; } -#define MAX_STRIPE_BATCH 8 -static int handle_active_stripes(struct r5conf *conf) +static int handle_active_stripes(struct r5conf *conf, int group, + struct r5worker *worker) { struct stripe_head *batch[MAX_STRIPE_BATCH], *sh; int i, batch_size = 0; while (batch_size < MAX_STRIPE_BATCH && - (sh = __get_priority_stripe(conf)) != NULL) + (sh = __get_priority_stripe(conf, group)) != NULL) batch[batch_size++] = sh; if (batch_size == 0) @@ -4813,6 +4972,39 @@ static int handle_active_stripes(struct r5conf *conf) return batch_size; } +static void raid5_do_work(struct work_struct *work) +{ + struct r5worker *worker = container_of(work, struct r5worker, work); + struct r5worker_group *group = worker->group; + struct r5conf *conf = group->conf; + int group_id = group - conf->worker_groups; + int handled; + struct blk_plug plug; + + pr_debug("+++ raid5worker active\n"); + + blk_start_plug(&plug); + handled = 0; + spin_lock_irq(&conf->device_lock); + while (1) { + int batch_size, released; + + released = release_stripe_list(conf); + + batch_size = handle_active_stripes(conf, group_id, worker); + worker->working = false; + if (!batch_size && !released) + break; + handled += batch_size; + } + pr_debug("%d stripes handled\n", handled); + + spin_unlock_irq(&conf->device_lock); + blk_finish_plug(&plug); + + pr_debug("--- raid5worker inactive\n"); +} + /* * This is our raid5 kernel thread. * @@ -4836,7 +5028,9 @@ static void raid5d(struct md_thread *thread) spin_lock_irq(&conf->device_lock); while (1) { struct bio *bio; - int batch_size; + int batch_size, released; + + released = release_stripe_list(conf); if ( !list_empty(&conf->bitmap_list)) { @@ -4860,8 +5054,8 @@ static void raid5d(struct md_thread *thread) handled++; } - batch_size = handle_active_stripes(conf); - if (!batch_size) + batch_size = handle_active_stripes(conf, ANY_GROUP, NULL); + if (!batch_size && !released) break; handled += batch_size; @@ -4989,10 +5183,70 @@ stripe_cache_active_show(struct mddev *mddev, char *page) static struct md_sysfs_entry raid5_stripecache_active = __ATTR_RO(stripe_cache_active); +static ssize_t +raid5_show_group_thread_cnt(struct mddev *mddev, char *page) +{ + struct r5conf *conf = mddev->private; + if (conf) + return sprintf(page, "%d\n", conf->worker_cnt_per_group); + else + return 0; +} + +static int alloc_thread_groups(struct r5conf *conf, int cnt); +static ssize_t +raid5_store_group_thread_cnt(struct mddev *mddev, const char *page, size_t len) +{ + struct r5conf *conf = mddev->private; + unsigned long new; + int err; + struct r5worker_group *old_groups; + int old_group_cnt; + + if (len >= PAGE_SIZE) + return -EINVAL; + if (!conf) + return -ENODEV; + + if (kstrtoul(page, 10, &new)) + return -EINVAL; + + if (new == conf->worker_cnt_per_group) + return len; + + mddev_suspend(mddev); + + old_groups = conf->worker_groups; + old_group_cnt = conf->worker_cnt_per_group; + + conf->worker_groups = NULL; + err = alloc_thread_groups(conf, new); + if (err) { + conf->worker_groups = old_groups; + conf->worker_cnt_per_group = old_group_cnt; + } else { + if (old_groups) + kfree(old_groups[0].workers); + kfree(old_groups); + } + + mddev_resume(mddev); + + if (err) + return err; + return len; +} + +static struct md_sysfs_entry +raid5_group_thread_cnt = __ATTR(group_thread_cnt, S_IRUGO | S_IWUSR, + raid5_show_group_thread_cnt, + raid5_store_group_thread_cnt); + static struct attribute *raid5_attrs[] = { &raid5_stripecache_size.attr, &raid5_stripecache_active.attr, &raid5_preread_bypass_threshold.attr, + &raid5_group_thread_cnt.attr, NULL, }; static struct attribute_group raid5_attrs_group = { @@ -5000,6 +5254,54 @@ static struct attribute_group raid5_attrs_group = { .attrs = raid5_attrs, }; +static int alloc_thread_groups(struct r5conf *conf, int cnt) +{ + int i, j; + ssize_t size; + struct r5worker *workers; + + conf->worker_cnt_per_group = cnt; + if (cnt == 0) { + conf->worker_groups = NULL; + return 0; + } + conf->group_cnt = num_possible_nodes(); + size = sizeof(struct r5worker) * cnt; + workers = kzalloc(size * conf->group_cnt, GFP_NOIO); + conf->worker_groups = kzalloc(sizeof(struct r5worker_group) * + conf->group_cnt, GFP_NOIO); + if (!conf->worker_groups || !workers) { + kfree(workers); + kfree(conf->worker_groups); + conf->worker_groups = NULL; + return -ENOMEM; + } + + for (i = 0; i < conf->group_cnt; i++) { + struct r5worker_group *group; + + group = &conf->worker_groups[i]; + INIT_LIST_HEAD(&group->handle_list); + group->conf = conf; + group->workers = workers + i * cnt; + + for (j = 0; j < cnt; j++) { + group->workers[j].group = group; + INIT_WORK(&group->workers[j].work, raid5_do_work); + } + } + + return 0; +} + +static void free_thread_groups(struct r5conf *conf) +{ + if (conf->worker_groups) + kfree(conf->worker_groups[0].workers); + kfree(conf->worker_groups); + conf->worker_groups = NULL; +} + static sector_t raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks) { @@ -5040,6 +5342,7 @@ static void raid5_free_percpu(struct r5conf *conf) static void free_conf(struct r5conf *conf) { + free_thread_groups(conf); shrink_stripes(conf); raid5_free_percpu(conf); kfree(conf->disks); @@ -5168,7 +5471,11 @@ static struct r5conf *setup_conf(struct mddev *mddev) conf = kzalloc(sizeof(struct r5conf), GFP_KERNEL); if (conf == NULL) goto abort; + /* Don't enable multi-threading by default*/ + if (alloc_thread_groups(conf, 0)) + goto abort; spin_lock_init(&conf->device_lock); + seqcount_init(&conf->gen_lock); init_waitqueue_head(&conf->wait_for_stripe); init_waitqueue_head(&conf->wait_for_overlap); INIT_LIST_HEAD(&conf->handle_list); @@ -5176,6 +5483,7 @@ static struct r5conf *setup_conf(struct mddev *mddev) INIT_LIST_HEAD(&conf->delayed_list); INIT_LIST_HEAD(&conf->bitmap_list); INIT_LIST_HEAD(&conf->inactive_list); + init_llist_head(&conf->released_stripes); atomic_set(&conf->active_stripes, 0); atomic_set(&conf->preread_active_stripes, 0); atomic_set(&conf->active_aligned_reads, 0); @@ -5980,6 +6288,7 @@ static int raid5_start_reshape(struct mddev *mddev) atomic_set(&conf->reshape_stripes, 0); spin_lock_irq(&conf->device_lock); + write_seqcount_begin(&conf->gen_lock); conf->previous_raid_disks = conf->raid_disks; conf->raid_disks += mddev->delta_disks; conf->prev_chunk_sectors = conf->chunk_sectors; @@ -5996,8 +6305,16 @@ static int raid5_start_reshape(struct mddev *mddev) else conf->reshape_progress = 0; conf->reshape_safe = conf->reshape_progress; + write_seqcount_end(&conf->gen_lock); spin_unlock_irq(&conf->device_lock); + /* Now make sure any requests that proceeded on the assumption + * the reshape wasn't running - like Discard or Read - have + * completed. + */ + mddev_suspend(mddev); + mddev_resume(mddev); + /* Add some new drives, as many as will fit. * We know there are enough to make the newly sized array work. * Don't add devices if we are reducing the number of @@ -6472,6 +6789,10 @@ static struct md_personality raid4_personality = static int __init raid5_init(void) { + raid5_wq = alloc_workqueue("raid5wq", + WQ_UNBOUND|WQ_MEM_RECLAIM|WQ_CPU_INTENSIVE|WQ_SYSFS, 0); + if (!raid5_wq) + return -ENOMEM; register_md_personality(&raid6_personality); register_md_personality(&raid5_personality); register_md_personality(&raid4_personality); @@ -6483,6 +6804,7 @@ static void raid5_exit(void) unregister_md_personality(&raid6_personality); unregister_md_personality(&raid5_personality); unregister_md_personality(&raid4_personality); + destroy_workqueue(raid5_wq); } module_init(raid5_init); diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h index 70c49329ca9a..2113ffa82c7a 100644 --- a/drivers/md/raid5.h +++ b/drivers/md/raid5.h @@ -197,6 +197,7 @@ enum reconstruct_states { struct stripe_head { struct hlist_node hash; struct list_head lru; /* inactive_list or handle_list */ + struct llist_node release_list; struct r5conf *raid_conf; short generation; /* increments with every * reshape */ @@ -211,6 +212,8 @@ struct stripe_head { enum check_states check_state; enum reconstruct_states reconstruct_state; spinlock_t stripe_lock; + int cpu; + struct r5worker_group *group; /** * struct stripe_operations * @target - STRIPE_OP_COMPUTE_BLK target @@ -321,6 +324,7 @@ enum { STRIPE_OPS_REQ_PENDING, STRIPE_ON_UNPLUG_LIST, STRIPE_DISCARD, + STRIPE_ON_RELEASE_LIST, }; /* @@ -363,6 +367,19 @@ struct disk_info { struct md_rdev *rdev, *replacement; }; +struct r5worker { + struct work_struct work; + struct r5worker_group *group; + bool working; +}; + +struct r5worker_group { + struct list_head handle_list; + struct r5conf *conf; + struct r5worker *workers; + int stripes_cnt; +}; + struct r5conf { struct hlist_head *stripe_hashtbl; struct mddev *mddev; @@ -386,6 +403,7 @@ struct r5conf { int prev_chunk_sectors; int prev_algo; short generation; /* increments with every reshape */ + seqcount_t gen_lock; /* lock against generation changes */ unsigned long reshape_checkpoint; /* Time we last updated * metadata */ long long min_offset_diff; /* minimum difference between @@ -445,6 +463,7 @@ struct r5conf { */ atomic_t active_stripes; struct list_head inactive_list; + struct llist_head released_stripes; wait_queue_head_t wait_for_stripe; wait_queue_head_t wait_for_overlap; int inactive_blocked; /* release of inactive stripes blocked, @@ -458,6 +477,9 @@ struct r5conf { * the new thread here until we fully activate the array. */ struct md_thread *thread; + struct r5worker_group *worker_groups; + int group_cnt; + int worker_cnt_per_group; }; /* |