summaryrefslogtreecommitdiff
path: root/drivers/md/bcache/stats.c
diff options
context:
space:
mode:
authorKent Overstreet <kmo@daterainc.com>2013-07-25 04:44:17 +0400
committerKent Overstreet <kmo@daterainc.com>2013-11-11 09:56:08 +0400
commitc18536a72ddd7fe30d63e6c1500b5c930ac14594 (patch)
tree0794a00a28c810326b76a36f599e8eee1932008a /drivers/md/bcache/stats.c
parentcc23196631fbcd1bc3eafedbb712413fdbf946a3 (diff)
downloadlinux-c18536a72ddd7fe30d63e6c1500b5c930ac14594.tar.xz
bcache: Prune struct btree_op
Eventual goal is for struct btree_op to contain only what is necessary for traversing the btree. Signed-off-by: Kent Overstreet <kmo@daterainc.com>
Diffstat (limited to 'drivers/md/bcache/stats.c')
-rw-r--r--drivers/md/bcache/stats.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/drivers/md/bcache/stats.c b/drivers/md/bcache/stats.c
index b8730e714d69..ea77263cf7ef 100644
--- a/drivers/md/bcache/stats.c
+++ b/drivers/md/bcache/stats.c
@@ -200,7 +200,7 @@ void bch_mark_cache_accounting(struct search *s, bool hit, bool bypass)
{
struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
mark_cache_stats(&dc->accounting.collector, hit, bypass);
- mark_cache_stats(&s->op.c->accounting.collector, hit, bypass);
+ mark_cache_stats(&s->c->accounting.collector, hit, bypass);
#ifdef CONFIG_CGROUP_BCACHE
mark_cache_stats(&(bch_bio_to_cgroup(s->orig_bio)->stats), hit, bypass);
#endif
@@ -210,21 +210,21 @@ void bch_mark_cache_readahead(struct search *s)
{
struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
atomic_inc(&dc->accounting.collector.cache_readaheads);
- atomic_inc(&s->op.c->accounting.collector.cache_readaheads);
+ atomic_inc(&s->c->accounting.collector.cache_readaheads);
}
void bch_mark_cache_miss_collision(struct search *s)
{
struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
atomic_inc(&dc->accounting.collector.cache_miss_collisions);
- atomic_inc(&s->op.c->accounting.collector.cache_miss_collisions);
+ atomic_inc(&s->c->accounting.collector.cache_miss_collisions);
}
void bch_mark_sectors_bypassed(struct search *s, int sectors)
{
struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
atomic_add(sectors, &dc->accounting.collector.sectors_bypassed);
- atomic_add(sectors, &s->op.c->accounting.collector.sectors_bypassed);
+ atomic_add(sectors, &s->c->accounting.collector.sectors_bypassed);
}
void bch_cache_accounting_init(struct cache_accounting *acc,