diff options
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/md/bcache/bset.c | 2 | ||||
-rw-r--r-- | drivers/md/bcache/btree.c | 81 | ||||
-rw-r--r-- | drivers/md/bcache/btree.h | 8 | ||||
-rw-r--r-- | drivers/md/bcache/journal.c | 8 | ||||
-rw-r--r-- | drivers/md/bcache/movinggc.c | 4 | ||||
-rw-r--r-- | drivers/md/bcache/request.c | 35 | ||||
-rw-r--r-- | drivers/md/bcache/request.h | 1 | ||||
-rw-r--r-- | drivers/md/bcache/writeback.c | 5 |
8 files changed, 63 insertions, 81 deletions
diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c index fae5b7b3f5ab..f7b5525ddafa 100644 --- a/drivers/md/bcache/bset.c +++ b/drivers/md/bcache/bset.c @@ -1196,7 +1196,7 @@ int bch_bset_print_stats(struct cache_set *c, char *buf) int ret; memset(&t, 0, sizeof(struct bset_stats)); - bch_btree_op_init_stack(&t.op); + bch_btree_op_init(&t.op, -1); ret = bch_btree_map_nodes(&t.op, c, &ZERO_KEY, btree_bset_stats); if (ret < 0) diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c index 5cb59c313dc3..cb1a490f7f86 100644 --- a/drivers/md/bcache/btree.c +++ b/drivers/md/bcache/btree.c @@ -115,13 +115,6 @@ enum { static struct workqueue_struct *btree_io_wq; -void bch_btree_op_init_stack(struct btree_op *op) -{ - memset(op, 0, sizeof(struct btree_op)); - closure_init_stack(&op->cl); - op->lock = -1; -} - static inline bool should_split(struct btree *b) { struct bset *i = write_block(b); @@ -965,8 +958,7 @@ err: * bch_btree_node_get - find a btree node in the cache and lock it, reading it * in from disk if necessary. * - * If IO is necessary, it uses the closure embedded in struct btree_op to wait; - * if that closure is in non blocking mode, will return -EAGAIN. + * If IO is necessary and running under generic_make_request, returns -EAGAIN. * * The btree node will have either a read or a write lock held, depending on * level and op->lock. @@ -1260,6 +1252,9 @@ static void btree_gc_coalesce(struct btree *b, struct gc_stat *gc, { unsigned nodes = 0, keys = 0, blocks; int i; + struct closure cl; + + closure_init_stack(&cl); while (nodes < GC_MERGE_NODES && r[nodes].b) keys += r[nodes++].keys; @@ -1353,9 +1348,7 @@ static int btree_gc_recurse(struct btree *b, struct btree_op *op, { void write(struct btree *r) { - if (!r->written) - bch_btree_node_write(r, &op->cl); - else if (btree_node_dirty(r)) + if (!r->written || btree_node_dirty(r)) bch_btree_node_write(r, writes); up_write(&r->lock); @@ -1431,6 +1424,9 @@ static int bch_btree_gc_root(struct btree *b, struct btree_op *op, struct btree *n = NULL; unsigned keys = 0; int ret = 0, stale = btree_gc_mark_node(b, &keys, gc); + struct closure cl; + + closure_init_stack(&cl); if (b->level || stale > 10) n = btree_node_alloc_replacement(b); @@ -1442,11 +1438,11 @@ static int bch_btree_gc_root(struct btree *b, struct btree_op *op, ret = btree_gc_recurse(b, op, writes, gc); if (!b->written || btree_node_dirty(b)) { - bch_btree_node_write(b, n ? &op->cl : NULL); + bch_btree_node_write(b, n ? &cl : NULL); } if (!IS_ERR_OR_NULL(n)) { - closure_sync(&op->cl); + closure_sync(&cl); bch_btree_set_root(b); btree_node_free(n); rw_unlock(true, b); @@ -1545,15 +1541,13 @@ static void bch_btree_gc(struct cache_set *c) memset(&stats, 0, sizeof(struct gc_stat)); closure_init_stack(&writes); - bch_btree_op_init_stack(&op); - op.lock = SHRT_MAX; + bch_btree_op_init(&op, SHRT_MAX); btree_gc_start(c); atomic_inc(&c->prio_blocked); ret = btree_root(gc_root, c, &op, &writes, &stats); - closure_sync(&op.cl); closure_sync(&writes); if (ret) { @@ -1562,8 +1556,8 @@ static void bch_btree_gc(struct cache_set *c) } /* Possibly wait for new UUIDs or whatever to hit disk */ - bch_journal_meta(c, &op.cl); - closure_sync(&op.cl); + bch_journal_meta(c, &writes); + closure_sync(&writes); available = bch_btree_gc_finish(c); @@ -1671,8 +1665,7 @@ int bch_btree_check(struct cache_set *c) struct btree_op op; memset(seen, 0, sizeof(seen)); - bch_btree_op_init_stack(&op); - op.lock = SHRT_MAX; + bch_btree_op_init(&op, SHRT_MAX); for (i = 0; c->cache[i]; i++) { size_t n = DIV_ROUND_UP(c->cache[i]->sb.nbuckets, 8); @@ -1980,6 +1973,9 @@ static int btree_split(struct btree *b, struct btree_op *op, bool split; struct btree *n1, *n2 = NULL, *n3 = NULL; uint64_t start_time = local_clock(); + struct closure cl; + + closure_init_stack(&cl); n1 = btree_node_alloc_replacement(b); if (IS_ERR(n1)) @@ -2025,7 +2021,7 @@ static int btree_split(struct btree *b, struct btree_op *op, bkey_copy_key(&n2->key, &b->key); bch_keylist_add(parent_keys, &n2->key); - bch_btree_node_write(n2, &op->cl); + bch_btree_node_write(n2, &cl); rw_unlock(true, n2); } else { trace_bcache_btree_node_compact(b, n1->sets[0].data->keys); @@ -2034,23 +2030,23 @@ static int btree_split(struct btree *b, struct btree_op *op, } bch_keylist_add(parent_keys, &n1->key); - bch_btree_node_write(n1, &op->cl); + bch_btree_node_write(n1, &cl); if (n3) { /* Depth increases, make a new root */ bkey_copy_key(&n3->key, &MAX_KEY); bch_btree_insert_keys(n3, op, parent_keys); - bch_btree_node_write(n3, &op->cl); + bch_btree_node_write(n3, &cl); - closure_sync(&op->cl); + closure_sync(&cl); bch_btree_set_root(n3); rw_unlock(true, n3); } else if (!b->parent) { /* Root filled up but didn't need to be split */ bch_keylist_reset(parent_keys); - closure_sync(&op->cl); + closure_sync(&cl); bch_btree_set_root(n1); } else { unsigned i; @@ -2065,7 +2061,7 @@ static int btree_split(struct btree *b, struct btree_op *op, } bch_keylist_push(parent_keys); - closure_sync(&op->cl); + closure_sync(&cl); atomic_inc(&b->c->prio_blocked); } @@ -2126,10 +2122,15 @@ static int bch_btree_insert_node(struct btree *b, struct btree_op *op, BUG_ON(write_block(b) != b->sets[b->nsets].data); if (bch_btree_insert_keys(b, op, insert_keys)) { - if (!b->level) + if (!b->level) { bch_btree_leaf_dirty(b, journal_ref); - else - bch_btree_node_write(b, &op->cl); + } else { + struct closure cl; + + closure_init_stack(&cl); + bch_btree_node_write(b, &cl); + closure_sync(&cl); + } } } } while (!bch_keylist_empty(&split_keys)); @@ -2204,12 +2205,6 @@ int bch_btree_insert(struct btree_op *op, struct cache_set *c, { int ret = 0; - /* - * Don't want to block with the btree locked unless we have to, - * otherwise we get deadlocks with try_harder and between split/gc - */ - clear_closure_blocking(&op->cl); - BUG_ON(bch_keylist_empty(keys)); while (!bch_keylist_empty(keys)) { @@ -2217,8 +2212,8 @@ int bch_btree_insert(struct btree_op *op, struct cache_set *c, ret = btree_root(insert_recurse, c, op, keys, journal_ref); if (ret == -EAGAIN) { + BUG(); ret = 0; - closure_sync(&op->cl); } else if (ret) { struct bkey *k; @@ -2292,10 +2287,7 @@ static int bch_btree_map_nodes_recurse(struct btree *b, struct btree_op *op, int __bch_btree_map_nodes(struct btree_op *op, struct cache_set *c, struct bkey *from, btree_map_nodes_fn *fn, int flags) { - int ret = btree_root(map_nodes_recurse, c, op, from, fn, flags); - if (closure_blocking(&op->cl)) - closure_sync(&op->cl); - return ret; + return btree_root(map_nodes_recurse, c, op, from, fn, flags); } static int bch_btree_map_keys_recurse(struct btree *b, struct btree_op *op, @@ -2328,10 +2320,7 @@ static int bch_btree_map_keys_recurse(struct btree *b, struct btree_op *op, int bch_btree_map_keys(struct btree_op *op, struct cache_set *c, struct bkey *from, btree_map_keys_fn *fn, int flags) { - int ret = btree_root(map_keys_recurse, c, op, from, fn, flags); - if (closure_blocking(&op->cl)) - closure_sync(&op->cl); - return ret; + return btree_root(map_keys_recurse, c, op, from, fn, flags); } /* Keybuf code */ @@ -2409,7 +2398,7 @@ void bch_refill_keybuf(struct cache_set *c, struct keybuf *buf, cond_resched(); - bch_btree_op_init_stack(&refill.op); + bch_btree_op_init(&refill.op, -1); refill.buf = buf; refill.end = end; refill.pred = pred; diff --git a/drivers/md/bcache/btree.h b/drivers/md/bcache/btree.h index 3f820b67150c..34ee5359b262 100644 --- a/drivers/md/bcache/btree.h +++ b/drivers/md/bcache/btree.h @@ -237,8 +237,6 @@ void __bkey_put(struct cache_set *c, struct bkey *k); /* Recursing down the btree */ struct btree_op { - struct closure cl; - /* Btree level at which we start taking write locks */ short lock; @@ -253,7 +251,11 @@ struct btree_op { BKEY_PADDED(replace); }; -void bch_btree_op_init_stack(struct btree_op *); +static inline void bch_btree_op_init(struct btree_op *op, int write_lock_level) +{ + memset(op, 0, sizeof(struct btree_op)); + op->lock = write_lock_level; +} static inline void rw_lock(bool w, struct btree *b, int level) { diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c index 725c8eb9a62a..20e900ad5010 100644 --- a/drivers/md/bcache/journal.c +++ b/drivers/md/bcache/journal.c @@ -305,8 +305,7 @@ int bch_journal_replay(struct cache_set *s, struct list_head *list) struct btree_op op; bch_keylist_init(&keylist); - bch_btree_op_init_stack(&op); - op.lock = SHRT_MAX; + bch_btree_op_init(&op, SHRT_MAX); list_for_each_entry(i, list, list) { BUG_ON(i->pin && atomic_read(i->pin) != 1); @@ -341,14 +340,13 @@ int bch_journal_replay(struct cache_set *s, struct list_head *list) pr_info("journal replay done, %i keys in %i entries, seq %llu", keys, entries, end); - +err: while (!list_empty(list)) { i = list_first_entry(list, struct journal_replay, list); list_del(&i->list); kfree(i); } -err: - closure_sync(&op.cl); + return ret; } diff --git a/drivers/md/bcache/movinggc.c b/drivers/md/bcache/movinggc.c index 80e30d77221e..219356f6159d 100644 --- a/drivers/md/bcache/movinggc.c +++ b/drivers/md/bcache/movinggc.c @@ -108,8 +108,8 @@ static void write_moving(struct closure *cl) s->op.type = BTREE_REPLACE; bkey_copy(&s->op.replace, &io->w->key); - closure_init(&s->op.cl, cl); - bch_data_insert(&s->op.cl); + closure_init(&s->btree, cl); + bch_data_insert(&s->btree); } continue_at(cl, write_moving_finish, system_wq); diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c index 818e2e39e71f..5df44fbc9e1d 100644 --- a/drivers/md/bcache/request.c +++ b/drivers/md/bcache/request.c @@ -215,8 +215,7 @@ static void bio_csum(struct bio *bio, struct bkey *k) static void bch_data_insert_keys(struct closure *cl) { - struct btree_op *op = container_of(cl, struct btree_op, cl); - struct search *s = container_of(op, struct search, op); + struct search *s = container_of(cl, struct search, btree); atomic_t *journal_ref = NULL; /* @@ -236,7 +235,7 @@ static void bch_data_insert_keys(struct closure *cl) s->flush_journal ? &s->cl : NULL); - if (bch_btree_insert(op, s->c, &s->insert_keys, journal_ref)) { + if (bch_btree_insert(&s->op, s->c, &s->insert_keys, journal_ref)) { s->error = -ENOMEM; s->insert_data_done = true; } @@ -433,8 +432,7 @@ static bool bch_alloc_sectors(struct bkey *k, unsigned sectors, static void bch_data_invalidate(struct closure *cl) { - struct btree_op *op = container_of(cl, struct btree_op, cl); - struct search *s = container_of(op, struct search, op); + struct search *s = container_of(cl, struct search, btree); struct bio *bio = s->cache_bio; pr_debug("invalidating %i sectors from %llu", @@ -461,8 +459,7 @@ out: static void bch_data_insert_error(struct closure *cl) { - struct btree_op *op = container_of(cl, struct btree_op, cl); - struct search *s = container_of(op, struct search, op); + struct search *s = container_of(cl, struct search, btree); /* * Our data write just errored, which means we've got a bunch of keys to @@ -493,8 +490,7 @@ static void bch_data_insert_error(struct closure *cl) static void bch_data_insert_endio(struct bio *bio, int error) { struct closure *cl = bio->bi_private; - struct btree_op *op = container_of(cl, struct btree_op, cl); - struct search *s = container_of(op, struct search, op); + struct search *s = container_of(cl, struct search, btree); if (error) { /* TODO: We could try to recover from this. */ @@ -511,8 +507,7 @@ static void bch_data_insert_endio(struct bio *bio, int error) static void bch_data_insert_start(struct closure *cl) { - struct btree_op *op = container_of(cl, struct btree_op, cl); - struct search *s = container_of(op, struct search, op); + struct search *s = container_of(cl, struct search, btree); struct bio *bio = s->cache_bio, *n; if (s->bypass) @@ -630,8 +625,7 @@ err: */ void bch_data_insert(struct closure *cl) { - struct btree_op *op = container_of(cl, struct btree_op, cl); - struct search *s = container_of(op, struct search, op); + struct search *s = container_of(cl, struct search, btree); bch_keylist_init(&s->insert_keys); bio_get(s->cache_bio); @@ -731,11 +725,10 @@ static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k) static void cache_lookup(struct closure *cl) { - struct btree_op *op = container_of(cl, struct btree_op, cl); - struct search *s = container_of(op, struct search, op); + struct search *s = container_of(cl, struct search, btree); struct bio *bio = &s->bio.bio; - int ret = bch_btree_map_keys(op, s->c, + int ret = bch_btree_map_keys(&s->op, s->c, &KEY(s->inode, bio->bi_sector, 0), cache_lookup_fn, MAP_END_KEY); if (ret == -EAGAIN) @@ -1064,7 +1057,7 @@ static void cached_dev_read_done(struct closure *cl) if (s->cache_bio && !test_bit(CACHE_SET_STOPPING, &s->c->flags)) { s->op.type = BTREE_REPLACE; - closure_call(&s->op.cl, bch_data_insert, NULL, cl); + closure_call(&s->btree, bch_data_insert, NULL, cl); } continue_at(cl, cached_dev_cache_miss_done, NULL); @@ -1156,7 +1149,7 @@ static void cached_dev_read(struct cached_dev *dc, struct search *s) { struct closure *cl = &s->cl; - closure_call(&s->op.cl, cache_lookup, NULL, cl); + closure_call(&s->btree, cache_lookup, NULL, cl); continue_at(cl, cached_dev_read_done_bh, NULL); } @@ -1239,7 +1232,7 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s) closure_bio_submit(bio, cl, s->d); } - closure_call(&s->op.cl, bch_data_insert, NULL, cl); + closure_call(&s->btree, bch_data_insert, NULL, cl); continue_at(cl, cached_dev_write_complete, NULL); } @@ -1418,9 +1411,9 @@ static void flash_dev_make_request(struct request_queue *q, struct bio *bio) s->writeback = true; s->cache_bio = bio; - closure_call(&s->op.cl, bch_data_insert, NULL, cl); + closure_call(&s->btree, bch_data_insert, NULL, cl); } else { - closure_call(&s->op.cl, cache_lookup, NULL, cl); + closure_call(&s->btree, cache_lookup, NULL, cl); } continue_at(cl, search_free, NULL); diff --git a/drivers/md/bcache/request.h b/drivers/md/bcache/request.h index 0f79177c4f33..ed578aa53ee2 100644 --- a/drivers/md/bcache/request.h +++ b/drivers/md/bcache/request.h @@ -6,6 +6,7 @@ struct search { /* Stack frame for bio_complete */ struct closure cl; + struct closure btree; struct bcache_device *d; struct cache_set *c; diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c index b58c2bc91e3f..d0968e8938f7 100644 --- a/drivers/md/bcache/writeback.c +++ b/drivers/md/bcache/writeback.c @@ -143,7 +143,7 @@ static void write_dirty_finish(struct closure *cl) struct btree_op op; struct keylist keys; - bch_btree_op_init_stack(&op); + bch_btree_op_init(&op, -1); bch_keylist_init(&keys); op.type = BTREE_REPLACE; @@ -156,7 +156,6 @@ static void write_dirty_finish(struct closure *cl) atomic_inc(&PTR_BUCKET(dc->disk.c, &w->key, i)->pin); bch_btree_insert(&op, dc->disk.c, &keys, NULL); - closure_sync(&op.cl); if (op.insert_collision) trace_bcache_writeback_collision(&w->key); @@ -457,7 +456,7 @@ void bch_sectors_dirty_init(struct cached_dev *dc) { struct sectors_dirty_init op; - bch_btree_op_init_stack(&op.op); + bch_btree_op_init(&op.op, -1); op.inode = dc->disk.id; bch_btree_map_keys(&op.op, dc->disk.c, &KEY(op.inode, 0, 0), |