diff options
Diffstat (limited to 'drivers/md/bcache/request.c')
-rw-r--r-- | drivers/md/bcache/request.c | 1116 |
1 files changed, 541 insertions, 575 deletions
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c index 786a1a4f74d8..fbcc851ed5a5 100644 --- a/drivers/md/bcache/request.c +++ b/drivers/md/bcache/request.c @@ -25,7 +25,7 @@ struct kmem_cache *bch_search_cache; -static void check_should_skip(struct cached_dev *, struct search *); +static void bch_data_insert_start(struct closure *); /* Cgroup interface */ @@ -213,221 +213,79 @@ static void bio_csum(struct bio *bio, struct bkey *k) /* Insert data into cache */ -static void bio_invalidate(struct closure *cl) +static void bch_data_insert_keys(struct closure *cl) { - struct btree_op *op = container_of(cl, struct btree_op, cl); - struct bio *bio = op->cache_bio; - - pr_debug("invalidating %i sectors from %llu", - bio_sectors(bio), (uint64_t) bio->bi_sector); - - while (bio_sectors(bio)) { - unsigned len = min(bio_sectors(bio), 1U << 14); - - if (bch_keylist_realloc(&op->keys, 0, op->c)) - goto out; - - bio->bi_sector += len; - bio->bi_size -= len << 9; - - bch_keylist_add(&op->keys, - &KEY(op->inode, bio->bi_sector, len)); - } - - op->insert_data_done = true; - bio_put(bio); -out: - continue_at(cl, bch_journal, bcache_wq); -} - -struct open_bucket { - struct list_head list; - struct task_struct *last; - unsigned sectors_free; - BKEY_PADDED(key); -}; - -void bch_open_buckets_free(struct cache_set *c) -{ - struct open_bucket *b; - - while (!list_empty(&c->data_buckets)) { - b = list_first_entry(&c->data_buckets, - struct open_bucket, list); - list_del(&b->list); - kfree(b); - } -} - -int bch_open_buckets_alloc(struct cache_set *c) -{ - int i; - - spin_lock_init(&c->data_bucket_lock); - - for (i = 0; i < 6; i++) { - struct open_bucket *b = kzalloc(sizeof(*b), GFP_KERNEL); - if (!b) - return -ENOMEM; - - list_add(&b->list, &c->data_buckets); - } - - return 0; -} - -/* - * We keep multiple buckets open for writes, and try to segregate different - * write streams for better cache utilization: first we look for a bucket where - * the last write to it was sequential with the current write, and failing that - * we look for a bucket that was last used by the same task. - * - * The ideas is if you've got multiple tasks pulling data into the cache at the - * same time, you'll get better cache utilization if you try to segregate their - * data and preserve locality. - * - * For example, say you've starting Firefox at the same time you're copying a - * bunch of files. Firefox will likely end up being fairly hot and stay in the - * cache awhile, but the data you copied might not be; if you wrote all that - * data to the same buckets it'd get invalidated at the same time. - * - * Both of those tasks will be doing fairly random IO so we can't rely on - * detecting sequential IO to segregate their data, but going off of the task - * should be a sane heuristic. - */ -static struct open_bucket *pick_data_bucket(struct cache_set *c, - const struct bkey *search, - struct task_struct *task, - struct bkey *alloc) -{ - struct open_bucket *ret, *ret_task = NULL; - - list_for_each_entry_reverse(ret, &c->data_buckets, list) - if (!bkey_cmp(&ret->key, search)) - goto found; - else if (ret->last == task) - ret_task = ret; - - ret = ret_task ?: list_first_entry(&c->data_buckets, - struct open_bucket, list); -found: - if (!ret->sectors_free && KEY_PTRS(alloc)) { - ret->sectors_free = c->sb.bucket_size; - bkey_copy(&ret->key, alloc); - bkey_init(alloc); - } - - if (!ret->sectors_free) - ret = NULL; - - return ret; -} - -/* - * Allocates some space in the cache to write to, and k to point to the newly - * allocated space, and updates KEY_SIZE(k) and KEY_OFFSET(k) (to point to the - * end of the newly allocated space). - * - * May allocate fewer sectors than @sectors, KEY_SIZE(k) indicates how many - * sectors were actually allocated. - * - * If s->writeback is true, will not fail. - */ -static bool bch_alloc_sectors(struct bkey *k, unsigned sectors, - struct search *s) -{ - struct cache_set *c = s->op.c; - struct open_bucket *b; - BKEY_PADDED(key) alloc; - struct closure cl, *w = NULL; - unsigned i; - - if (s->writeback) { - closure_init_stack(&cl); - w = &cl; - } + struct data_insert_op *op = container_of(cl, struct data_insert_op, cl); + atomic_t *journal_ref = NULL; + struct bkey *replace_key = op->replace ? &op->replace_key : NULL; + int ret; /* - * We might have to allocate a new bucket, which we can't do with a - * spinlock held. So if we have to allocate, we drop the lock, allocate - * and then retry. KEY_PTRS() indicates whether alloc points to - * allocated bucket(s). + * If we're looping, might already be waiting on + * another journal write - can't wait on more than one journal write at + * a time + * + * XXX: this looks wrong */ +#if 0 + while (atomic_read(&s->cl.remaining) & CLOSURE_WAITING) + closure_sync(&s->cl); +#endif - bkey_init(&alloc.key); - spin_lock(&c->data_bucket_lock); - - while (!(b = pick_data_bucket(c, k, s->task, &alloc.key))) { - unsigned watermark = s->op.write_prio - ? WATERMARK_MOVINGGC - : WATERMARK_NONE; - - spin_unlock(&c->data_bucket_lock); - - if (bch_bucket_alloc_set(c, watermark, &alloc.key, 1, w)) - return false; + if (!op->replace) + journal_ref = bch_journal(op->c, &op->insert_keys, + op->flush_journal ? cl : NULL); - spin_lock(&c->data_bucket_lock); + ret = bch_btree_insert(op->c, &op->insert_keys, + journal_ref, replace_key); + if (ret == -ESRCH) { + op->replace_collision = true; + } else if (ret) { + op->error = -ENOMEM; + op->insert_data_done = true; } - /* - * If we had to allocate, we might race and not need to allocate the - * second time we call find_data_bucket(). If we allocated a bucket but - * didn't use it, drop the refcount bch_bucket_alloc_set() took: - */ - if (KEY_PTRS(&alloc.key)) - __bkey_put(c, &alloc.key); - - for (i = 0; i < KEY_PTRS(&b->key); i++) - EBUG_ON(ptr_stale(c, &b->key, i)); + if (journal_ref) + atomic_dec_bug(journal_ref); - /* Set up the pointer to the space we're allocating: */ + if (!op->insert_data_done) + continue_at(cl, bch_data_insert_start, bcache_wq); - for (i = 0; i < KEY_PTRS(&b->key); i++) - k->ptr[i] = b->key.ptr[i]; + bch_keylist_free(&op->insert_keys); + closure_return(cl); +} - sectors = min(sectors, b->sectors_free); +static void bch_data_invalidate(struct closure *cl) +{ + struct data_insert_op *op = container_of(cl, struct data_insert_op, cl); + struct bio *bio = op->bio; - SET_KEY_OFFSET(k, KEY_OFFSET(k) + sectors); - SET_KEY_SIZE(k, sectors); - SET_KEY_PTRS(k, KEY_PTRS(&b->key)); + pr_debug("invalidating %i sectors from %llu", + bio_sectors(bio), (uint64_t) bio->bi_sector); - /* - * Move b to the end of the lru, and keep track of what this bucket was - * last used for: - */ - list_move_tail(&b->list, &c->data_buckets); - bkey_copy_key(&b->key, k); - b->last = s->task; + while (bio_sectors(bio)) { + unsigned sectors = min(bio_sectors(bio), + 1U << (KEY_SIZE_BITS - 1)); - b->sectors_free -= sectors; + if (bch_keylist_realloc(&op->insert_keys, 0, op->c)) + goto out; - for (i = 0; i < KEY_PTRS(&b->key); i++) { - SET_PTR_OFFSET(&b->key, i, PTR_OFFSET(&b->key, i) + sectors); + bio->bi_sector += sectors; + bio->bi_size -= sectors << 9; - atomic_long_add(sectors, - &PTR_CACHE(c, &b->key, i)->sectors_written); + bch_keylist_add(&op->insert_keys, + &KEY(op->inode, bio->bi_sector, sectors)); } - if (b->sectors_free < c->sb.block_size) - b->sectors_free = 0; - - /* - * k takes refcounts on the buckets it points to until it's inserted - * into the btree, but if we're done with this bucket we just transfer - * get_data_bucket()'s refcount. - */ - if (b->sectors_free) - for (i = 0; i < KEY_PTRS(&b->key); i++) - atomic_inc(&PTR_BUCKET(c, &b->key, i)->pin); - - spin_unlock(&c->data_bucket_lock); - return true; + op->insert_data_done = true; + bio_put(bio); +out: + continue_at(cl, bch_data_insert_keys, bcache_wq); } -static void bch_insert_data_error(struct closure *cl) +static void bch_data_insert_error(struct closure *cl) { - struct btree_op *op = container_of(cl, struct btree_op, cl); + struct data_insert_op *op = container_of(cl, struct data_insert_op, cl); /* * Our data write just errored, which means we've got a bunch of keys to @@ -438,35 +296,34 @@ static void bch_insert_data_error(struct closure *cl) * from the keys we'll accomplish just that. */ - struct bkey *src = op->keys.bottom, *dst = op->keys.bottom; + struct bkey *src = op->insert_keys.keys, *dst = op->insert_keys.keys; - while (src != op->keys.top) { + while (src != op->insert_keys.top) { struct bkey *n = bkey_next(src); SET_KEY_PTRS(src, 0); - bkey_copy(dst, src); + memmove(dst, src, bkey_bytes(src)); dst = bkey_next(dst); src = n; } - op->keys.top = dst; + op->insert_keys.top = dst; - bch_journal(cl); + bch_data_insert_keys(cl); } -static void bch_insert_data_endio(struct bio *bio, int error) +static void bch_data_insert_endio(struct bio *bio, int error) { struct closure *cl = bio->bi_private; - struct btree_op *op = container_of(cl, struct btree_op, cl); - struct search *s = container_of(op, struct search, op); + struct data_insert_op *op = container_of(cl, struct data_insert_op, cl); if (error) { /* TODO: We could try to recover from this. */ - if (s->writeback) - s->error = error; - else if (s->write) - set_closure_fn(cl, bch_insert_data_error, bcache_wq); + if (op->writeback) + op->error = error; + else if (!op->replace) + set_closure_fn(cl, bch_data_insert_error, bcache_wq); else set_closure_fn(cl, NULL, NULL); } @@ -474,18 +331,17 @@ static void bch_insert_data_endio(struct bio *bio, int error) bch_bbio_endio(op->c, bio, error, "writing data to cache"); } -static void bch_insert_data_loop(struct closure *cl) +static void bch_data_insert_start(struct closure *cl) { - struct btree_op *op = container_of(cl, struct btree_op, cl); - struct search *s = container_of(op, struct search, op); - struct bio *bio = op->cache_bio, *n; + struct data_insert_op *op = container_of(cl, struct data_insert_op, cl); + struct bio *bio = op->bio, *n; - if (op->skip) - return bio_invalidate(cl); + if (op->bypass) + return bch_data_invalidate(cl); if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0) { set_gc_sectors(op->c); - bch_queue_gc(op->c); + wake_up_gc(op->c); } /* @@ -497,29 +353,30 @@ static void bch_insert_data_loop(struct closure *cl) do { unsigned i; struct bkey *k; - struct bio_set *split = s->d - ? s->d->bio_split : op->c->bio_split; + struct bio_set *split = op->c->bio_split; /* 1 for the device pointer and 1 for the chksum */ - if (bch_keylist_realloc(&op->keys, + if (bch_keylist_realloc(&op->insert_keys, 1 + (op->csum ? 1 : 0), op->c)) - continue_at(cl, bch_journal, bcache_wq); + continue_at(cl, bch_data_insert_keys, bcache_wq); - k = op->keys.top; + k = op->insert_keys.top; bkey_init(k); SET_KEY_INODE(k, op->inode); SET_KEY_OFFSET(k, bio->bi_sector); - if (!bch_alloc_sectors(k, bio_sectors(bio), s)) + if (!bch_alloc_sectors(op->c, k, bio_sectors(bio), + op->write_point, op->write_prio, + op->writeback)) goto err; n = bch_bio_split(bio, KEY_SIZE(k), GFP_NOIO, split); - n->bi_end_io = bch_insert_data_endio; + n->bi_end_io = bch_data_insert_endio; n->bi_private = cl; - if (s->writeback) { + if (op->writeback) { SET_KEY_DIRTY(k, true); for (i = 0; i < KEY_PTRS(k); i++) @@ -532,17 +389,17 @@ static void bch_insert_data_loop(struct closure *cl) bio_csum(n, k); trace_bcache_cache_insert(k); - bch_keylist_push(&op->keys); + bch_keylist_push(&op->insert_keys); n->bi_rw |= REQ_WRITE; bch_submit_bbio(n, op->c, k, 0); } while (n != bio); op->insert_data_done = true; - continue_at(cl, bch_journal, bcache_wq); + continue_at(cl, bch_data_insert_keys, bcache_wq); err: /* bch_alloc_sectors() blocks if s->writeback = true */ - BUG_ON(s->writeback); + BUG_ON(op->writeback); /* * But if it's not a writeback write we'd rather just bail out if @@ -550,15 +407,15 @@ err: * we might be starving btree writes for gc or something. */ - if (s->write) { + if (!op->replace) { /* * Writethrough write: We can't complete the write until we've * updated the index. But we don't want to delay the write while * we wait for buckets to be freed up, so just invalidate the * rest of the write. */ - op->skip = true; - return bio_invalidate(cl); + op->bypass = true; + return bch_data_invalidate(cl); } else { /* * From a cache miss, we can just insert the keys for the data @@ -567,15 +424,15 @@ err: op->insert_data_done = true; bio_put(bio); - if (!bch_keylist_empty(&op->keys)) - continue_at(cl, bch_journal, bcache_wq); + if (!bch_keylist_empty(&op->insert_keys)) + continue_at(cl, bch_data_insert_keys, bcache_wq); else closure_return(cl); } } /** - * bch_insert_data - stick some data in the cache + * bch_data_insert - stick some data in the cache * * This is the starting point for any data to end up in a cache device; it could * be from a normal write, or a writeback write, or a write to a flash only @@ -587,56 +444,179 @@ err: * data is written it calls bch_journal, and after the keys have been added to * the next journal write they're inserted into the btree. * - * It inserts the data in op->cache_bio; bi_sector is used for the key offset, + * It inserts the data in s->cache_bio; bi_sector is used for the key offset, * and op->inode is used for the key inode. * - * If op->skip is true, instead of inserting the data it invalidates the region - * of the cache represented by op->cache_bio and op->inode. + * If s->bypass is true, instead of inserting the data it invalidates the + * region of the cache represented by s->cache_bio and op->inode. */ -void bch_insert_data(struct closure *cl) +void bch_data_insert(struct closure *cl) { - struct btree_op *op = container_of(cl, struct btree_op, cl); + struct data_insert_op *op = container_of(cl, struct data_insert_op, cl); + + trace_bcache_write(op->bio, op->writeback, op->bypass); - bch_keylist_init(&op->keys); - bio_get(op->cache_bio); - bch_insert_data_loop(cl); + bch_keylist_init(&op->insert_keys); + bio_get(op->bio); + bch_data_insert_start(cl); } -void bch_btree_insert_async(struct closure *cl) +/* Congested? */ + +unsigned bch_get_congested(struct cache_set *c) { - struct btree_op *op = container_of(cl, struct btree_op, cl); - struct search *s = container_of(op, struct search, op); + int i; + long rand; - if (bch_btree_insert(op, op->c)) { - s->error = -ENOMEM; - op->insert_data_done = true; - } + if (!c->congested_read_threshold_us && + !c->congested_write_threshold_us) + return 0; + + i = (local_clock_us() - c->congested_last_us) / 1024; + if (i < 0) + return 0; + + i += atomic_read(&c->congested); + if (i >= 0) + return 0; + + i += CONGESTED_MAX; + + if (i > 0) + i = fract_exp_two(i, 6); - if (op->insert_data_done) { - bch_keylist_free(&op->keys); - closure_return(cl); - } else - continue_at(cl, bch_insert_data_loop, bcache_wq); + rand = get_random_int(); + i -= bitmap_weight(&rand, BITS_PER_LONG); + + return i > 0 ? i : 1; } -/* Common code for the make_request functions */ +static void add_sequential(struct task_struct *t) +{ + ewma_add(t->sequential_io_avg, + t->sequential_io, 8, 0); -static void request_endio(struct bio *bio, int error) + t->sequential_io = 0; +} + +static struct hlist_head *iohash(struct cached_dev *dc, uint64_t k) { - struct closure *cl = bio->bi_private; + return &dc->io_hash[hash_64(k, RECENT_IO_BITS)]; +} - if (error) { - struct search *s = container_of(cl, struct search, cl); - s->error = error; - /* Only cache read errors are recoverable */ - s->recoverable = false; +static bool check_should_bypass(struct cached_dev *dc, struct bio *bio) +{ + struct cache_set *c = dc->disk.c; + unsigned mode = cache_mode(dc, bio); + unsigned sectors, congested = bch_get_congested(c); + struct task_struct *task = current; + struct io *i; + + if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) || + c->gc_stats.in_use > CUTOFF_CACHE_ADD || + (bio->bi_rw & REQ_DISCARD)) + goto skip; + + if (mode == CACHE_MODE_NONE || + (mode == CACHE_MODE_WRITEAROUND && + (bio->bi_rw & REQ_WRITE))) + goto skip; + + if (bio->bi_sector & (c->sb.block_size - 1) || + bio_sectors(bio) & (c->sb.block_size - 1)) { + pr_debug("skipping unaligned io"); + goto skip; } - bio_put(bio); - closure_put(cl); + if (bypass_torture_test(dc)) { + if ((get_random_int() & 3) == 3) + goto skip; + else + goto rescale; + } + + if (!congested && !dc->sequential_cutoff) + goto rescale; + + if (!congested && + mode == CACHE_MODE_WRITEBACK && + (bio->bi_rw & REQ_WRITE) && + (bio->bi_rw & REQ_SYNC)) + goto rescale; + + spin_lock(&dc->io_lock); + + hlist_for_each_entry(i, iohash(dc, bio->bi_sector), hash) + if (i->last == bio->bi_sector && + time_before(jiffies, i->jiffies)) + goto found; + + i = list_first_entry(&dc->io_lru, struct io, lru); + + add_sequential(task); + i->sequential = 0; +found: + if (i->sequential + bio->bi_size > i->sequential) + i->sequential += bio->bi_size; + + i->last = bio_end_sector(bio); + i->jiffies = jiffies + msecs_to_jiffies(5000); + task->sequential_io = i->sequential; + + hlist_del(&i->hash); + hlist_add_head(&i->hash, iohash(dc, i->last)); + list_move_tail(&i->lru, &dc->io_lru); + + spin_unlock(&dc->io_lock); + + sectors = max(task->sequential_io, + task->sequential_io_avg) >> 9; + + if (dc->sequential_cutoff && + sectors >= dc->sequential_cutoff >> 9) { + trace_bcache_bypass_sequential(bio); + goto skip; + } + + if (congested && sectors >= congested) { + trace_bcache_bypass_congested(bio); + goto skip; + } + +rescale: + bch_rescale_priorities(c, bio_sectors(bio)); + return false; +skip: + bch_mark_sectors_bypassed(c, dc, bio_sectors(bio)); + return true; } -void bch_cache_read_endio(struct bio *bio, int error) +/* Cache lookup */ + +struct search { + /* Stack frame for bio_complete */ + struct closure cl; + + struct bcache_device *d; + + struct bbio bio; + struct bio *orig_bio; + struct bio *cache_miss; + + unsigned insert_bio_sectors; + + unsigned recoverable:1; + unsigned unaligned_bvec:1; + unsigned write:1; + unsigned read_dirty_data:1; + + unsigned long start_time; + + struct btree_op op; + struct data_insert_op iop; +}; + +static void bch_cache_read_endio(struct bio *bio, int error) { struct bbio *b = container_of(bio, struct bbio, bio); struct closure *cl = bio->bi_private; @@ -650,13 +630,113 @@ void bch_cache_read_endio(struct bio *bio, int error) */ if (error) - s->error = error; - else if (ptr_stale(s->op.c, &b->key, 0)) { - atomic_long_inc(&s->op.c->cache_read_races); - s->error = -EINTR; + s->iop.error = error; + else if (ptr_stale(s->iop.c, &b->key, 0)) { + atomic_long_inc(&s->iop.c->cache_read_races); + s->iop.error = -EINTR; } - bch_bbio_endio(s->op.c, bio, error, "reading from cache"); + bch_bbio_endio(s->iop.c, bio, error, "reading from cache"); +} + +/* + * Read from a single key, handling the initial cache miss if the key starts in + * the middle of the bio + */ +static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k) +{ + struct search *s = container_of(op, struct search, op); + struct bio *n, *bio = &s->bio.bio; + struct bkey *bio_key; + unsigned ptr; + + if (bkey_cmp(k, &KEY(s->iop.inode, bio->bi_sector, 0)) <= 0) + return MAP_CONTINUE; + + if (KEY_INODE(k) != s->iop.inode || + KEY_START(k) > bio->bi_sector) { + unsigned bio_sectors = bio_sectors(bio); + unsigned sectors = KEY_INODE(k) == s->iop.inode + ? min_t(uint64_t, INT_MAX, + KEY_START(k) - bio->bi_sector) + : INT_MAX; + + int ret = s->d->cache_miss(b, s, bio, sectors); + if (ret != MAP_CONTINUE) + return ret; + + /* if this was a complete miss we shouldn't get here */ + BUG_ON(bio_sectors <= sectors); + } + + if (!KEY_SIZE(k)) + return MAP_CONTINUE; + + /* XXX: figure out best pointer - for multiple cache devices */ + ptr = 0; + + PTR_BUCKET(b->c, k, ptr)->prio = INITIAL_PRIO; + + if (KEY_DIRTY(k)) + s->read_dirty_data = true; + + n = bch_bio_split(bio, min_t(uint64_t, INT_MAX, + KEY_OFFSET(k) - bio->bi_sector), + GFP_NOIO, s->d->bio_split); + + bio_key = &container_of(n, struct bbio, bio)->key; + bch_bkey_copy_single_ptr(bio_key, k, ptr); + + bch_cut_front(&KEY(s->iop.inode, n->bi_sector, 0), bio_key); + bch_cut_back(&KEY(s->iop.inode, bio_end_sector(n), 0), bio_key); + + n->bi_end_io = bch_cache_read_endio; + n->bi_private = &s->cl; + + /* + * The bucket we're reading from might be reused while our bio + * is in flight, and we could then end up reading the wrong + * data. + * + * We guard against this by checking (in cache_read_endio()) if + * the pointer is stale again; if so, we treat it as an error + * and reread from the backing device (but we don't pass that + * error up anywhere). + */ + + __bch_submit_bbio(n, b->c); + return n == bio ? MAP_DONE : MAP_CONTINUE; +} + +static void cache_lookup(struct closure *cl) +{ + struct search *s = container_of(cl, struct search, iop.cl); + struct bio *bio = &s->bio.bio; + + int ret = bch_btree_map_keys(&s->op, s->iop.c, + &KEY(s->iop.inode, bio->bi_sector, 0), + cache_lookup_fn, MAP_END_KEY); + if (ret == -EAGAIN) + continue_at(cl, cache_lookup, bcache_wq); + + closure_return(cl); +} + +/* Common code for the make_request functions */ + +static void request_endio(struct bio *bio, int error) +{ + struct closure *cl = bio->bi_private; + + if (error) { + struct search *s = container_of(cl, struct search, cl); + s->iop.error = error; + /* Only cache read errors are recoverable */ + s->recoverable = false; + } + + bio_put(bio); + closure_put(cl); } static void bio_complete(struct search *s) @@ -670,8 +750,8 @@ static void bio_complete(struct search *s) part_stat_add(cpu, &s->d->disk->part0, ticks[rw], duration); part_stat_unlock(); - trace_bcache_request_end(s, s->orig_bio); - bio_endio(s->orig_bio, s->error); + trace_bcache_request_end(s->d, s->orig_bio); + bio_endio(s->orig_bio, s->iop.error); s->orig_bio = NULL; } } @@ -691,8 +771,8 @@ static void search_free(struct closure *cl) struct search *s = container_of(cl, struct search, cl); bio_complete(s); - if (s->op.cache_bio) - bio_put(s->op.cache_bio); + if (s->iop.bio) + bio_put(s->iop.bio); if (s->unaligned_bvec) mempool_free(s->bio.bio.bi_io_vec, s->d->unaligned_bvec); @@ -703,21 +783,22 @@ static void search_free(struct closure *cl) static struct search *search_alloc(struct bio *bio, struct bcache_device *d) { + struct search *s; struct bio_vec *bv; - struct search *s = mempool_alloc(d->c->search, GFP_NOIO); - memset(s, 0, offsetof(struct search, op.keys)); + + s = mempool_alloc(d->c->search, GFP_NOIO); + memset(s, 0, offsetof(struct search, iop.insert_keys)); __closure_init(&s->cl, NULL); - s->op.inode = d->id; - s->op.c = d->c; + s->iop.inode = d->id; + s->iop.c = d->c; s->d = d; s->op.lock = -1; - s->task = current; + s->iop.write_point = hash_long((unsigned long) current, 16); s->orig_bio = bio; s->write = (bio->bi_rw & REQ_WRITE) != 0; - s->op.flush_journal = (bio->bi_rw & (REQ_FLUSH|REQ_FUA)) != 0; - s->op.skip = (bio->bi_rw & REQ_DISCARD) != 0; + s->iop.flush_journal = (bio->bi_rw & (REQ_FLUSH|REQ_FUA)) != 0; s->recoverable = 1; s->start_time = jiffies; do_bio_hook(s); @@ -734,18 +815,6 @@ static struct search *search_alloc(struct bio *bio, struct bcache_device *d) return s; } -static void btree_read_async(struct closure *cl) -{ - struct btree_op *op = container_of(cl, struct btree_op, cl); - - int ret = btree_root(search_recurse, op->c, op); - - if (ret == -EAGAIN) - continue_at(cl, btree_read_async, bcache_wq); - - closure_return(cl); -} - /* Cached devices */ static void cached_dev_bio_complete(struct closure *cl) @@ -759,27 +828,28 @@ static void cached_dev_bio_complete(struct closure *cl) /* Process reads */ -static void cached_dev_read_complete(struct closure *cl) +static void cached_dev_cache_miss_done(struct closure *cl) { struct search *s = container_of(cl, struct search, cl); - if (s->op.insert_collision) - bch_mark_cache_miss_collision(s); + if (s->iop.replace_collision) + bch_mark_cache_miss_collision(s->iop.c, s->d); - if (s->op.cache_bio) { + if (s->iop.bio) { int i; struct bio_vec *bv; - __bio_for_each_segment(bv, s->op.cache_bio, i, 0) + bio_for_each_segment_all(bv, s->iop.bio, i) __free_page(bv->bv_page); } cached_dev_bio_complete(cl); } -static void request_read_error(struct closure *cl) +static void cached_dev_read_error(struct closure *cl) { struct search *s = container_of(cl, struct search, cl); + struct bio *bio = &s->bio.bio; struct bio_vec *bv; int i; @@ -787,7 +857,7 @@ static void request_read_error(struct closure *cl) /* Retry from the backing device: */ trace_bcache_read_retry(s->orig_bio); - s->error = 0; + s->iop.error = 0; bv = s->bio.bio.bi_io_vec; do_bio_hook(s); s->bio.bio.bi_io_vec = bv; @@ -803,146 +873,148 @@ static void request_read_error(struct closure *cl) /* XXX: invalidate cache */ - closure_bio_submit(&s->bio.bio, &s->cl, s->d); + closure_bio_submit(bio, cl, s->d); } - continue_at(cl, cached_dev_read_complete, NULL); + continue_at(cl, cached_dev_cache_miss_done, NULL); } -static void request_read_done(struct closure *cl) +static void cached_dev_read_done(struct closure *cl) { struct search *s = container_of(cl, struct search, cl); struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); /* - * s->cache_bio != NULL implies that we had a cache miss; cache_bio now - * contains data ready to be inserted into the cache. + * We had a cache miss; cache_bio now contains data ready to be inserted + * into the cache. * * First, we copy the data we just read from cache_bio's bounce buffers * to the buffers the original bio pointed to: */ - if (s->op.cache_bio) { - bio_reset(s->op.cache_bio); - s->op.cache_bio->bi_sector = s->cache_miss->bi_sector; - s->op.cache_bio->bi_bdev = s->cache_miss->bi_bdev; - s->op.cache_bio->bi_size = s->cache_bio_sectors << 9; - bch_bio_map(s->op.cache_bio, NULL); + if (s->iop.bio) { + bio_reset(s->iop.bio); + s->iop.bio->bi_sector = s->cache_miss->bi_sector; + s->iop.bio->bi_bdev = s->cache_miss->bi_bdev; + s->iop.bio->bi_size = s->insert_bio_sectors << 9; + bch_bio_map(s->iop.bio, NULL); - bio_copy_data(s->cache_miss, s->op.cache_bio); + bio_copy_data(s->cache_miss, s->iop.bio); bio_put(s->cache_miss); s->cache_miss = NULL; } - if (verify(dc, &s->bio.bio) && s->recoverable) - bch_data_verify(s); + if (verify(dc, &s->bio.bio) && s->recoverable && + !s->unaligned_bvec && !s->read_dirty_data) + bch_data_verify(dc, s->orig_bio); bio_complete(s); - if (s->op.cache_bio && - !test_bit(CACHE_SET_STOPPING, &s->op.c->flags)) { - s->op.type = BTREE_REPLACE; - closure_call(&s->op.cl, bch_insert_data, NULL, cl); + if (s->iop.bio && + !test_bit(CACHE_SET_STOPPING, &s->iop.c->flags)) { + BUG_ON(!s->iop.replace); + closure_call(&s->iop.cl, bch_data_insert, NULL, cl); } - continue_at(cl, cached_dev_read_complete, NULL); + continue_at(cl, cached_dev_cache_miss_done, NULL); } -static void request_read_done_bh(struct closure *cl) +static void cached_dev_read_done_bh(struct closure *cl) { struct search *s = container_of(cl, struct search, cl); struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); - bch_mark_cache_accounting(s, !s->cache_miss, s->op.skip); - trace_bcache_read(s->orig_bio, !s->cache_miss, s->op.skip); + bch_mark_cache_accounting(s->iop.c, s->d, + !s->cache_miss, s->iop.bypass); + trace_bcache_read(s->orig_bio, !s->cache_miss, s->iop.bypass); - if (s->error) - continue_at_nobarrier(cl, request_read_error, bcache_wq); - else if (s->op.cache_bio || verify(dc, &s->bio.bio)) - continue_at_nobarrier(cl, request_read_done, bcache_wq); + if (s->iop.error) + continue_at_nobarrier(cl, cached_dev_read_error, bcache_wq); + else if (s->iop.bio || verify(dc, &s->bio.bio)) + continue_at_nobarrier(cl, cached_dev_read_done, bcache_wq); else - continue_at_nobarrier(cl, cached_dev_read_complete, NULL); + continue_at_nobarrier(cl, cached_dev_bio_complete, NULL); } static int cached_dev_cache_miss(struct btree *b, struct search *s, struct bio *bio, unsigned sectors) { - int ret = 0; - unsigned reada; + int ret = MAP_CONTINUE; + unsigned reada = 0; struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); - struct bio *miss; + struct bio *miss, *cache_bio; - miss = bch_bio_split(bio, sectors, GFP_NOIO, s->d->bio_split); - if (miss == bio) - s->op.lookup_done = true; - - miss->bi_end_io = request_endio; - miss->bi_private = &s->cl; - - if (s->cache_miss || s->op.skip) + if (s->cache_miss || s->iop.bypass) { + miss = bch_bio_split(bio, sectors, GFP_NOIO, s->d->bio_split); + ret = miss == bio ? MAP_DONE : MAP_CONTINUE; goto out_submit; - - if (miss != bio || - (bio->bi_rw & REQ_RAHEAD) || - (bio->bi_rw & REQ_META) || - s->op.c->gc_stats.in_use >= CUTOFF_CACHE_READA) - reada = 0; - else { - reada = min(dc->readahead >> 9, - sectors - bio_sectors(miss)); - - if (bio_end_sector(miss) + reada > bdev_sectors(miss->bi_bdev)) - reada = bdev_sectors(miss->bi_bdev) - - bio_end_sector(miss); } - s->cache_bio_sectors = bio_sectors(miss) + reada; - s->op.cache_bio = bio_alloc_bioset(GFP_NOWAIT, - DIV_ROUND_UP(s->cache_bio_sectors, PAGE_SECTORS), - dc->disk.bio_split); + if (!(bio->bi_rw & REQ_RAHEAD) && + !(bio->bi_rw & REQ_META) && + s->iop.c->gc_stats.in_use < CUTOFF_CACHE_READA) + reada = min_t(sector_t, dc->readahead >> 9, + bdev_sectors(bio->bi_bdev) - bio_end_sector(bio)); - if (!s->op.cache_bio) - goto out_submit; + s->insert_bio_sectors = min(sectors, bio_sectors(bio) + reada); - s->op.cache_bio->bi_sector = miss->bi_sector; - s->op.cache_bio->bi_bdev = miss->bi_bdev; - s->op.cache_bio->bi_size = s->cache_bio_sectors << 9; + s->iop.replace_key = KEY(s->iop.inode, + bio->bi_sector + s->insert_bio_sectors, + s->insert_bio_sectors); - s->op.cache_bio->bi_end_io = request_endio; - s->op.cache_bio->bi_private = &s->cl; + ret = bch_btree_insert_check_key(b, &s->op, &s->iop.replace_key); + if (ret) + return ret; + + s->iop.replace = true; + + miss = bch_bio_split(bio, sectors, GFP_NOIO, s->d->bio_split); /* btree_search_recurse()'s btree iterator is no good anymore */ - ret = -EINTR; - if (!bch_btree_insert_check_key(b, &s->op, s->op.cache_bio)) - goto out_put; + ret = miss == bio ? MAP_DONE : -EINTR; - bch_bio_map(s->op.cache_bio, NULL); - if (bio_alloc_pages(s->op.cache_bio, __GFP_NOWARN|GFP_NOIO)) + cache_bio = bio_alloc_bioset(GFP_NOWAIT, + DIV_ROUND_UP(s->insert_bio_sectors, PAGE_SECTORS), + dc->disk.bio_split); + if (!cache_bio) + goto out_submit; + + cache_bio->bi_sector = miss->bi_sector; + cache_bio->bi_bdev = miss->bi_bdev; + cache_bio->bi_size = s->insert_bio_sectors << 9; + + cache_bio->bi_end_io = request_endio; + cache_bio->bi_private = &s->cl; + + bch_bio_map(cache_bio, NULL); + if (bio_alloc_pages(cache_bio, __GFP_NOWARN|GFP_NOIO)) goto out_put; - s->cache_miss = miss; - bio_get(s->op.cache_bio); + if (reada) + bch_mark_cache_readahead(s->iop.c, s->d); - closure_bio_submit(s->op.cache_bio, &s->cl, s->d); + s->cache_miss = miss; + s->iop.bio = cache_bio; + bio_get(cache_bio); + closure_bio_submit(cache_bio, &s->cl, s->d); return ret; out_put: - bio_put(s->op.cache_bio); - s->op.cache_bio = NULL; + bio_put(cache_bio); out_submit: + miss->bi_end_io = request_endio; + miss->bi_private = &s->cl; closure_bio_submit(miss, &s->cl, s->d); return ret; } -static void request_read(struct cached_dev *dc, struct search *s) +static void cached_dev_read(struct cached_dev *dc, struct search *s) { struct closure *cl = &s->cl; - check_should_skip(dc, s); - closure_call(&s->op.cl, btree_read_async, NULL, cl); - - continue_at(cl, request_read_done_bh, NULL); + closure_call(&s->iop.cl, cache_lookup, NULL, cl); + continue_at(cl, cached_dev_read_done_bh, NULL); } /* Process writes */ @@ -956,88 +1028,85 @@ static void cached_dev_write_complete(struct closure *cl) cached_dev_bio_complete(cl); } -static void request_write(struct cached_dev *dc, struct search *s) +static void cached_dev_write(struct cached_dev *dc, struct search *s) { struct closure *cl = &s->cl; struct bio *bio = &s->bio.bio; - struct bkey start, end; - start = KEY(dc->disk.id, bio->bi_sector, 0); - end = KEY(dc->disk.id, bio_end_sector(bio), 0); + struct bkey start = KEY(dc->disk.id, bio->bi_sector, 0); + struct bkey end = KEY(dc->disk.id, bio_end_sector(bio), 0); - bch_keybuf_check_overlapping(&s->op.c->moving_gc_keys, &start, &end); + bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys, &start, &end); - check_should_skip(dc, s); down_read_non_owner(&dc->writeback_lock); - if (bch_keybuf_check_overlapping(&dc->writeback_keys, &start, &end)) { - s->op.skip = false; - s->writeback = true; + /* + * We overlap with some dirty data undergoing background + * writeback, force this write to writeback + */ + s->iop.bypass = false; + s->iop.writeback = true; } + /* + * Discards aren't _required_ to do anything, so skipping if + * check_overlapping returned true is ok + * + * But check_overlapping drops dirty keys for which io hasn't started, + * so we still want to call it. + */ if (bio->bi_rw & REQ_DISCARD) - goto skip; + s->iop.bypass = true; if (should_writeback(dc, s->orig_bio, cache_mode(dc, bio), - s->op.skip)) { - s->op.skip = false; - s->writeback = true; + s->iop.bypass)) { + s->iop.bypass = false; + s->iop.writeback = true; } - if (s->op.skip) - goto skip; - - trace_bcache_write(s->orig_bio, s->writeback, s->op.skip); + if (s->iop.bypass) { + s->iop.bio = s->orig_bio; + bio_get(s->iop.bio); - if (!s->writeback) { - s->op.cache_bio = bio_clone_bioset(bio, GFP_NOIO, - dc->disk.bio_split); - - closure_bio_submit(bio, cl, s->d); - } else { + if (!(bio->bi_rw & REQ_DISCARD) || + blk_queue_discard(bdev_get_queue(dc->bdev))) + closure_bio_submit(bio, cl, s->d); + } else if (s->iop.writeback) { bch_writeback_add(dc); + s->iop.bio = bio; - if (s->op.flush_journal) { + if (bio->bi_rw & REQ_FLUSH) { /* Also need to send a flush to the backing device */ - s->op.cache_bio = bio_clone_bioset(bio, GFP_NOIO, - dc->disk.bio_split); + struct bio *flush = bio_alloc_bioset(GFP_NOIO, 0, + dc->disk.bio_split); - bio->bi_size = 0; - bio->bi_vcnt = 0; - closure_bio_submit(bio, cl, s->d); - } else { - s->op.cache_bio = bio; + flush->bi_rw = WRITE_FLUSH; + flush->bi_bdev = bio->bi_bdev; + flush->bi_end_io = request_endio; + flush->bi_private = cl; + + closure_bio_submit(flush, cl, s->d); } - } -out: - closure_call(&s->op.cl, bch_insert_data, NULL, cl); - continue_at(cl, cached_dev_write_complete, NULL); -skip: - s->op.skip = true; - s->op.cache_bio = s->orig_bio; - bio_get(s->op.cache_bio); + } else { + s->iop.bio = bio_clone_bioset(bio, GFP_NOIO, + dc->disk.bio_split); - if ((bio->bi_rw & REQ_DISCARD) && - !blk_queue_discard(bdev_get_queue(dc->bdev))) - goto out; + closure_bio_submit(bio, cl, s->d); + } - closure_bio_submit(bio, cl, s->d); - goto out; + closure_call(&s->iop.cl, bch_data_insert, NULL, cl); + continue_at(cl, cached_dev_write_complete, NULL); } -static void request_nodata(struct cached_dev *dc, struct search *s) +static void cached_dev_nodata(struct closure *cl) { - struct closure *cl = &s->cl; + struct search *s = container_of(cl, struct search, cl); struct bio *bio = &s->bio.bio; - if (bio->bi_rw & REQ_DISCARD) { - request_write(dc, s); - return; - } - - if (s->op.flush_journal) - bch_journal_meta(s->op.c, cl); + if (s->iop.flush_journal) + bch_journal_meta(s->iop.c, cl); + /* If it's a flush, we send the flush to the backing device too */ closure_bio_submit(bio, cl, s->d); continue_at(cl, cached_dev_bio_complete, NULL); @@ -1045,134 +1114,6 @@ static void request_nodata(struct cached_dev *dc, struct search *s) /* Cached devices - read & write stuff */ -unsigned bch_get_congested(struct cache_set *c) -{ - int i; - long rand; - - if (!c->congested_read_threshold_us && - !c->congested_write_threshold_us) - return 0; - - i = (local_clock_us() - c->congested_last_us) / 1024; - if (i < 0) - return 0; - - i += atomic_read(&c->congested); - if (i >= 0) - return 0; - - i += CONGESTED_MAX; - - if (i > 0) - i = fract_exp_two(i, 6); - - rand = get_random_int(); - i -= bitmap_weight(&rand, BITS_PER_LONG); - - return i > 0 ? i : 1; -} - -static void add_sequential(struct task_struct *t) -{ - ewma_add(t->sequential_io_avg, - t->sequential_io, 8, 0); - - t->sequential_io = 0; -} - -static struct hlist_head *iohash(struct cached_dev *dc, uint64_t k) -{ - return &dc->io_hash[hash_64(k, RECENT_IO_BITS)]; -} - -static void check_should_skip(struct cached_dev *dc, struct search *s) -{ - struct cache_set *c = s->op.c; - struct bio *bio = &s->bio.bio; - unsigned mode = cache_mode(dc, bio); - unsigned sectors, congested = bch_get_congested(c); - - if (atomic_read(&dc->disk.detaching) || - c->gc_stats.in_use > CUTOFF_CACHE_ADD || - (bio->bi_rw & REQ_DISCARD)) - goto skip; - - if (mode == CACHE_MODE_NONE || - (mode == CACHE_MODE_WRITEAROUND && - (bio->bi_rw & REQ_WRITE))) - goto skip; - - if (bio->bi_sector & (c->sb.block_size - 1) || - bio_sectors(bio) & (c->sb.block_size - 1)) { - pr_debug("skipping unaligned io"); - goto skip; - } - - if (!congested && !dc->sequential_cutoff) - goto rescale; - - if (!congested && - mode == CACHE_MODE_WRITEBACK && - (bio->bi_rw & REQ_WRITE) && - (bio->bi_rw & REQ_SYNC)) - goto rescale; - - if (dc->sequential_merge) { - struct io *i; - - spin_lock(&dc->io_lock); - - hlist_for_each_entry(i, iohash(dc, bio->bi_sector), hash) - if (i->last == bio->bi_sector && - time_before(jiffies, i->jiffies)) - goto found; - - i = list_first_entry(&dc->io_lru, struct io, lru); - - add_sequential(s->task); - i->sequential = 0; -found: - if (i->sequential + bio->bi_size > i->sequential) - i->sequential += bio->bi_size; - - i->last = bio_end_sector(bio); - i->jiffies = jiffies + msecs_to_jiffies(5000); - s->task->sequential_io = i->sequential; - - hlist_del(&i->hash); - hlist_add_head(&i->hash, iohash(dc, i->last)); - list_move_tail(&i->lru, &dc->io_lru); - - spin_unlock(&dc->io_lock); - } else { - s->task->sequential_io = bio->bi_size; - - add_sequential(s->task); - } - - sectors = max(s->task->sequential_io, - s->task->sequential_io_avg) >> 9; - - if (dc->sequential_cutoff && - sectors >= dc->sequential_cutoff >> 9) { - trace_bcache_bypass_sequential(s->orig_bio); - goto skip; - } - - if (congested && sectors >= congested) { - trace_bcache_bypass_congested(s->orig_bio); - goto skip; - } - -rescale: - bch_rescale_priorities(c, bio_sectors(bio)); - return; -skip: - bch_mark_sectors_bypassed(s, bio_sectors(bio)); - s->op.skip = true; -} - static void cached_dev_make_request(struct request_queue *q, struct bio *bio) { struct search *s; @@ -1190,14 +1131,24 @@ static void cached_dev_make_request(struct request_queue *q, struct bio *bio) if (cached_dev_get(dc)) { s = search_alloc(bio, d); - trace_bcache_request_start(s, bio); + trace_bcache_request_start(s->d, bio); + + if (!bio->bi_size) { + /* + * can't call bch_journal_meta from under + * generic_make_request + */ + continue_at_nobarrier(&s->cl, + cached_dev_nodata, + bcache_wq); + } else { + s->iop.bypass = check_should_bypass(dc, bio); - if (!bio_has_data(bio)) - request_nodata(dc, s); - else if (rw) - request_write(dc, s); - else - request_read(dc, s); + if (rw) + cached_dev_write(dc, s); + else + cached_dev_read(dc, s); + } } else { if ((bio->bi_rw & REQ_DISCARD) && !blk_queue_discard(bdev_get_queue(dc->bdev))) @@ -1272,9 +1223,19 @@ static int flash_dev_cache_miss(struct btree *b, struct search *s, bio_advance(bio, min(sectors << 9, bio->bi_size)); if (!bio->bi_size) - s->op.lookup_done = true; + return MAP_DONE; - return 0; + return MAP_CONTINUE; +} + +static void flash_dev_nodata(struct closure *cl) +{ + struct search *s = container_of(cl, struct search, cl); + + if (s->iop.flush_journal) + bch_journal_meta(s->iop.c, cl); + + continue_at(cl, search_free, NULL); } static void flash_dev_make_request(struct request_queue *q, struct bio *bio) @@ -1293,23 +1254,28 @@ static void flash_dev_make_request(struct request_queue *q, struct bio *bio) cl = &s->cl; bio = &s->bio.bio; - trace_bcache_request_start(s, bio); + trace_bcache_request_start(s->d, bio); - if (bio_has_data(bio) && !rw) { - closure_call(&s->op.cl, btree_read_async, NULL, cl); - } else if (bio_has_data(bio) || s->op.skip) { - bch_keybuf_check_overlapping(&s->op.c->moving_gc_keys, + if (!bio->bi_size) { + /* + * can't call bch_journal_meta from under + * generic_make_request + */ + continue_at_nobarrier(&s->cl, + flash_dev_nodata, + bcache_wq); + } else if (rw) { + bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys, &KEY(d->id, bio->bi_sector, 0), &KEY(d->id, bio_end_sector(bio), 0)); - s->writeback = true; - s->op.cache_bio = bio; + s->iop.bypass = (bio->bi_rw & REQ_DISCARD) != 0; + s->iop.writeback = true; + s->iop.bio = bio; - closure_call(&s->op.cl, bch_insert_data, NULL, cl); + closure_call(&s->iop.cl, bch_data_insert, NULL, cl); } else { - /* No data - probably a cache flush */ - if (s->op.flush_journal) - bch_journal_meta(s->op.c, cl); + closure_call(&s->iop.cl, cache_lookup, NULL, cl); } continue_at(cl, search_free, NULL); |