summaryrefslogtreecommitdiff
path: root/drivers/md
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/Makefile1
-rw-r--r--drivers/md/bcache/Makefile1
-rw-r--r--drivers/md/bcache/alloc.c16
-rw-r--r--drivers/md/bcache/bcache.h20
-rw-r--r--drivers/md/bcache/bset.c1
-rw-r--r--drivers/md/bcache/bset.h1
-rw-r--r--drivers/md/bcache/btree.c18
-rw-r--r--drivers/md/bcache/btree.h3
-rw-r--r--drivers/md/bcache/closure.c4
-rw-r--r--drivers/md/bcache/closure.h7
-rw-r--r--drivers/md/bcache/debug.c1
-rw-r--r--drivers/md/bcache/debug.h1
-rw-r--r--drivers/md/bcache/extents.c1
-rw-r--r--drivers/md/bcache/extents.h1
-rw-r--r--drivers/md/bcache/io.c1
-rw-r--r--drivers/md/bcache/journal.c1
-rw-r--r--drivers/md/bcache/journal.h1
-rw-r--r--drivers/md/bcache/movinggc.c1
-rw-r--r--drivers/md/bcache/request.c37
-rw-r--r--drivers/md/bcache/request.h1
-rw-r--r--drivers/md/bcache/stats.c1
-rw-r--r--drivers/md/bcache/stats.h1
-rw-r--r--drivers/md/bcache/super.c52
-rw-r--r--drivers/md/bcache/sysfs.c29
-rw-r--r--drivers/md/bcache/sysfs.h1
-rw-r--r--drivers/md/bcache/trace.c1
-rw-r--r--drivers/md/bcache/util.c10
-rw-r--r--drivers/md/bcache/util.h5
-rw-r--r--drivers/md/bcache/writeback.c118
-rw-r--r--drivers/md/bcache/writeback.h7
-rw-r--r--drivers/md/dm-bufio.c10
-rw-r--r--drivers/md/dm-builtin.c1
-rw-r--r--drivers/md/dm-cache-background-tracker.c18
-rw-r--r--drivers/md/dm-cache-metadata.c9
-rw-r--r--drivers/md/dm-cache-policy-smq.c42
-rw-r--r--drivers/md/dm-cache-target.c326
-rw-r--r--drivers/md/dm-core.h4
-rw-r--r--drivers/md/dm-crypt.c9
-rw-r--r--drivers/md/dm-integrity.c17
-rw-r--r--drivers/md/dm-ioctl.c37
-rw-r--r--drivers/md/dm-kcopyd.c4
-rw-r--r--drivers/md/dm-log-writes.c175
-rw-r--r--drivers/md/dm-mpath.c20
-rw-r--r--drivers/md/dm-raid.c32
-rw-r--r--drivers/md/dm-rq.c2
-rw-r--r--drivers/md/dm-stats.c37
-rw-r--r--drivers/md/dm-stats.h1
-rw-r--r--drivers/md/dm-switch.c2
-rw-r--r--drivers/md/dm-table.c21
-rw-r--r--drivers/md/dm-thin.c2
-rw-r--r--drivers/md/dm-verity-target.c83
-rw-r--r--drivers/md/dm-verity.h5
-rw-r--r--drivers/md/dm-zoned-target.c13
-rw-r--r--drivers/md/dm.c70
-rw-r--r--drivers/md/dm.h3
-rw-r--r--drivers/md/md-bitmap.c2
-rw-r--r--drivers/md/md-bitmap.h1
-rw-r--r--drivers/md/md-cluster.h1
-rw-r--r--drivers/md/md-linear.h1
-rw-r--r--drivers/md/md-multipath.h1
-rw-r--r--drivers/md/md.c2
-rw-r--r--drivers/md/persistent-data/Makefile1
-rw-r--r--drivers/md/persistent-data/dm-space-map-metadata.c3
-rw-r--r--drivers/md/raid0.h1
-rw-r--r--drivers/md/raid1-10.c1
-rw-r--r--drivers/md/raid1.h1
-rw-r--r--drivers/md/raid10.h1
-rw-r--r--drivers/md/raid5-log.h1
-rw-r--r--drivers/md/raid5.c2
-rw-r--r--drivers/md/raid5.h1
70 files changed, 769 insertions, 537 deletions
diff --git a/drivers/md/Makefile b/drivers/md/Makefile
index 693602ffdd38..f701bb211783 100644
--- a/drivers/md/Makefile
+++ b/drivers/md/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the kernel software RAID and LVM drivers.
#
diff --git a/drivers/md/bcache/Makefile b/drivers/md/bcache/Makefile
index c488b846f831..d26b35195825 100644
--- a/drivers/md/bcache/Makefile
+++ b/drivers/md/bcache/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_BCACHE) += bcache.o
diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
index cacbe2dbd5c3..a27d85232ce1 100644
--- a/drivers/md/bcache/alloc.c
+++ b/drivers/md/bcache/alloc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Primary bucket allocation code
*
@@ -406,7 +407,8 @@ long bch_bucket_alloc(struct cache *ca, unsigned reserve, bool wait)
finish_wait(&ca->set->bucket_wait, &w);
out:
- wake_up_process(ca->alloc_thread);
+ if (ca->alloc_thread)
+ wake_up_process(ca->alloc_thread);
trace_bcache_alloc(ca, reserve);
@@ -441,6 +443,11 @@ out:
b->prio = INITIAL_PRIO;
}
+ if (ca->set->avail_nbuckets > 0) {
+ ca->set->avail_nbuckets--;
+ bch_update_bucket_in_use(ca->set, &ca->set->gc_stats);
+ }
+
return r;
}
@@ -448,6 +455,11 @@ void __bch_bucket_free(struct cache *ca, struct bucket *b)
{
SET_GC_MARK(b, 0);
SET_GC_SECTORS_USED(b, 0);
+
+ if (ca->set->avail_nbuckets < ca->set->nbuckets) {
+ ca->set->avail_nbuckets++;
+ bch_update_bucket_in_use(ca->set, &ca->set->gc_stats);
+ }
}
void bch_bucket_free(struct cache_set *c, struct bkey *k)
@@ -600,7 +612,7 @@ bool bch_alloc_sectors(struct cache_set *c, struct bkey *k, unsigned sectors,
/*
* If we had to allocate, we might race and not need to allocate the
- * second time we call find_data_bucket(). If we allocated a bucket but
+ * second time we call pick_data_bucket(). If we allocated a bucket but
* didn't use it, drop the refcount bch_bucket_alloc_set() took:
*/
if (KEY_PTRS(&alloc.key))
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index 2ed9bd231d84..843877e017e1 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _BCACHE_H
#define _BCACHE_H
@@ -184,6 +185,7 @@
#include <linux/mutex.h>
#include <linux/rbtree.h>
#include <linux/rwsem.h>
+#include <linux/refcount.h>
#include <linux/types.h>
#include <linux/workqueue.h>
@@ -265,9 +267,6 @@ struct bcache_device {
atomic_t *stripe_sectors_dirty;
unsigned long *full_dirty_stripes;
- unsigned long sectors_dirty_last;
- long sectors_dirty_derivative;
-
struct bio_set *bio_split;
unsigned data_csum:1;
@@ -299,7 +298,7 @@ struct cached_dev {
struct semaphore sb_write_mutex;
/* Refcount on the cache set. Always nonzero when we're caching. */
- atomic_t count;
+ refcount_t count;
struct work_struct detach;
/*
@@ -362,12 +361,14 @@ struct cached_dev {
uint64_t writeback_rate_target;
int64_t writeback_rate_proportional;
- int64_t writeback_rate_derivative;
- int64_t writeback_rate_change;
+ int64_t writeback_rate_integral;
+ int64_t writeback_rate_integral_scaled;
+ int32_t writeback_rate_change;
unsigned writeback_rate_update_seconds;
- unsigned writeback_rate_d_term;
+ unsigned writeback_rate_i_term_inverse;
unsigned writeback_rate_p_term_inverse;
+ unsigned writeback_rate_minimum;
};
enum alloc_reserve {
@@ -581,6 +582,7 @@ struct cache_set {
uint8_t need_gc;
struct gc_stat gc_stats;
size_t nbuckets;
+ size_t avail_nbuckets;
struct task_struct *gc_thread;
/* Where in the btree gc currently is */
@@ -806,13 +808,13 @@ do { \
static inline void cached_dev_put(struct cached_dev *dc)
{
- if (atomic_dec_and_test(&dc->count))
+ if (refcount_dec_and_test(&dc->count))
schedule_work(&dc->detach);
}
static inline bool cached_dev_get(struct cached_dev *dc)
{
- if (!atomic_inc_not_zero(&dc->count))
+ if (!refcount_inc_not_zero(&dc->count))
return false;
/* Paired with the mb in cached_dev_attach */
diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c
index 18526d44688d..e56d3ecdbfcb 100644
--- a/drivers/md/bcache/bset.c
+++ b/drivers/md/bcache/bset.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Code for working with individual keys, and sorted sets of keys with in a
* btree node
diff --git a/drivers/md/bcache/bset.h b/drivers/md/bcache/bset.h
index ae964624efb2..fa506c1aa524 100644
--- a/drivers/md/bcache/bset.h
+++ b/drivers/md/bcache/bset.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _BCACHE_BSET_H
#define _BCACHE_BSET_H
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index 866dcf78ff8e..11c5503d31dc 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2010 Kent Overstreet <kent.overstreet@gmail.com>
*
@@ -1240,6 +1241,11 @@ void bch_initial_mark_key(struct cache_set *c, int level, struct bkey *k)
__bch_btree_mark_key(c, level, k);
}
+void bch_update_bucket_in_use(struct cache_set *c, struct gc_stat *stats)
+{
+ stats->in_use = (c->nbuckets - c->avail_nbuckets) * 100 / c->nbuckets;
+}
+
static bool btree_gc_mark_node(struct btree *b, struct gc_stat *gc)
{
uint8_t stale = 0;
@@ -1651,9 +1657,8 @@ static void btree_gc_start(struct cache_set *c)
mutex_unlock(&c->bucket_lock);
}
-static size_t bch_btree_gc_finish(struct cache_set *c)
+static void bch_btree_gc_finish(struct cache_set *c)
{
- size_t available = 0;
struct bucket *b;
struct cache *ca;
unsigned i;
@@ -1690,6 +1695,7 @@ static size_t bch_btree_gc_finish(struct cache_set *c)
}
rcu_read_unlock();
+ c->avail_nbuckets = 0;
for_each_cache(ca, c, i) {
uint64_t *i;
@@ -1711,18 +1717,16 @@ static size_t bch_btree_gc_finish(struct cache_set *c)
BUG_ON(!GC_MARK(b) && GC_SECTORS_USED(b));
if (!GC_MARK(b) || GC_MARK(b) == GC_MARK_RECLAIMABLE)
- available++;
+ c->avail_nbuckets++;
}
}
mutex_unlock(&c->bucket_lock);
- return available;
}
static void bch_btree_gc(struct cache_set *c)
{
int ret;
- unsigned long available;
struct gc_stat stats;
struct closure writes;
struct btree_op op;
@@ -1745,14 +1749,14 @@ static void bch_btree_gc(struct cache_set *c)
pr_warn("gc failed!");
} while (ret);
- available = bch_btree_gc_finish(c);
+ bch_btree_gc_finish(c);
wake_up_allocators(c);
bch_time_stats_update(&c->btree_gc_time, start_time);
stats.key_bytes *= sizeof(uint64_t);
stats.data <<= 9;
- stats.in_use = (c->nbuckets - available) * 100 / c->nbuckets;
+ bch_update_bucket_in_use(c, &stats);
memcpy(&c->gc_stats, &stats, sizeof(struct gc_stat));
trace_bcache_gc_end(c);
diff --git a/drivers/md/bcache/btree.h b/drivers/md/bcache/btree.h
index 73da1f5626cb..d211e2c25b6b 100644
--- a/drivers/md/bcache/btree.h
+++ b/drivers/md/bcache/btree.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _BCACHE_BTREE_H
#define _BCACHE_BTREE_H
@@ -305,5 +306,5 @@ void bch_keybuf_del(struct keybuf *, struct keybuf_key *);
struct keybuf_key *bch_keybuf_next(struct keybuf *);
struct keybuf_key *bch_keybuf_next_rescan(struct cache_set *, struct keybuf *,
struct bkey *, keybuf_pred_fn *);
-
+void bch_update_bucket_in_use(struct cache_set *c, struct gc_stat *stats);
#endif
diff --git a/drivers/md/bcache/closure.c b/drivers/md/bcache/closure.c
index 7d5286b05036..1841d0359bac 100644
--- a/drivers/md/bcache/closure.c
+++ b/drivers/md/bcache/closure.c
@@ -64,7 +64,7 @@ EXPORT_SYMBOL(closure_put);
void __closure_wake_up(struct closure_waitlist *wait_list)
{
struct llist_node *list;
- struct closure *cl;
+ struct closure *cl, *t;
struct llist_node *reverse = NULL;
list = llist_del_all(&wait_list->list);
@@ -73,7 +73,7 @@ void __closure_wake_up(struct closure_waitlist *wait_list)
reverse = llist_reverse_order(list);
/* Then do the wakeups */
- llist_for_each_entry(cl, reverse, list) {
+ llist_for_each_entry_safe(cl, t, reverse, list) {
closure_set_waiting(cl, 0);
closure_sub(cl, CLOSURE_WAITING + 1);
}
diff --git a/drivers/md/bcache/closure.h b/drivers/md/bcache/closure.h
index 295b7e43f92c..ccfbea6f9f6b 100644
--- a/drivers/md/bcache/closure.h
+++ b/drivers/md/bcache/closure.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_CLOSURE_H
#define _LINUX_CLOSURE_H
@@ -251,6 +252,12 @@ static inline void set_closure_fn(struct closure *cl, closure_fn *fn,
static inline void closure_queue(struct closure *cl)
{
struct workqueue_struct *wq = cl->wq;
+ /**
+ * Changes made to closure, work_struct, or a couple of other structs
+ * may cause work.func not pointing to the right location.
+ */
+ BUILD_BUG_ON(offsetof(struct closure, fn)
+ != offsetof(struct work_struct, func));
if (wq) {
INIT_WORK(&cl->work, cl->work.func);
BUG_ON(!queue_work(wq, &cl->work));
diff --git a/drivers/md/bcache/debug.c b/drivers/md/bcache/debug.c
index 61076eda2e6d..c7a02c4900da 100644
--- a/drivers/md/bcache/debug.c
+++ b/drivers/md/bcache/debug.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Assorted bcache debug code
*
diff --git a/drivers/md/bcache/debug.h b/drivers/md/bcache/debug.h
index 1f63c195d247..acc48d3fa274 100644
--- a/drivers/md/bcache/debug.h
+++ b/drivers/md/bcache/debug.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _BCACHE_DEBUG_H
#define _BCACHE_DEBUG_H
diff --git a/drivers/md/bcache/extents.c b/drivers/md/bcache/extents.c
index 243de0bf15cd..41c238fc3733 100644
--- a/drivers/md/bcache/extents.c
+++ b/drivers/md/bcache/extents.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2010 Kent Overstreet <kent.overstreet@gmail.com>
*
diff --git a/drivers/md/bcache/extents.h b/drivers/md/bcache/extents.h
index e2ed54054e7a..0cd3575afa1d 100644
--- a/drivers/md/bcache/extents.h
+++ b/drivers/md/bcache/extents.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _BCACHE_EXTENTS_H
#define _BCACHE_EXTENTS_H
diff --git a/drivers/md/bcache/io.c b/drivers/md/bcache/io.c
index 7e871bdc0097..fac97ec2d0e2 100644
--- a/drivers/md/bcache/io.c
+++ b/drivers/md/bcache/io.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Some low level IO code, and hacks for various block layer limitations
*
diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
index 7e1d1c3ba33a..02a98ddb592d 100644
--- a/drivers/md/bcache/journal.c
+++ b/drivers/md/bcache/journal.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* bcache journalling code, for btree insertions
*
diff --git a/drivers/md/bcache/journal.h b/drivers/md/bcache/journal.h
index e3c39457afbb..b5788199188f 100644
--- a/drivers/md/bcache/journal.h
+++ b/drivers/md/bcache/journal.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _BCACHE_JOURNAL_H
#define _BCACHE_JOURNAL_H
diff --git a/drivers/md/bcache/movinggc.c b/drivers/md/bcache/movinggc.c
index f633b30c962e..d50c1c97da68 100644
--- a/drivers/md/bcache/movinggc.c
+++ b/drivers/md/bcache/movinggc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Moving/copying garbage collector
*
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index 681b4f12b05a..3a7aed7282b2 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Main bcache entry point - handle a read or a write request and decide what to
* do with it; the make_request functions are called by the block layer.
@@ -26,12 +27,12 @@ struct kmem_cache *bch_search_cache;
static void bch_data_insert_start(struct closure *);
-static unsigned cache_mode(struct cached_dev *dc, struct bio *bio)
+static unsigned cache_mode(struct cached_dev *dc)
{
return BDEV_CACHE_MODE(&dc->sb);
}
-static bool verify(struct cached_dev *dc, struct bio *bio)
+static bool verify(struct cached_dev *dc)
{
return dc->verify;
}
@@ -369,7 +370,7 @@ static struct hlist_head *iohash(struct cached_dev *dc, uint64_t k)
static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
{
struct cache_set *c = dc->disk.c;
- unsigned mode = cache_mode(dc, bio);
+ unsigned mode = cache_mode(dc);
unsigned sectors, congested = bch_get_congested(c);
struct task_struct *task = current;
struct io *i;
@@ -384,6 +385,14 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
op_is_write(bio_op(bio))))
goto skip;
+ /*
+ * Flag for bypass if the IO is for read-ahead or background,
+ * unless the read-ahead request is for metadata (eg, for gfs2).
+ */
+ if (bio->bi_opf & (REQ_RAHEAD|REQ_BACKGROUND) &&
+ !(bio->bi_opf & REQ_META))
+ goto skip;
+
if (bio->bi_iter.bi_sector & (c->sb.block_size - 1) ||
bio_sectors(bio) & (c->sb.block_size - 1)) {
pr_debug("skipping unaligned io");
@@ -462,6 +471,7 @@ struct search {
unsigned recoverable:1;
unsigned write:1;
unsigned read_dirty_data:1;
+ unsigned cache_missed:1;
unsigned long start_time;
@@ -648,6 +658,7 @@ static inline struct search *search_alloc(struct bio *bio,
s->orig_bio = bio;
s->cache_miss = NULL;
+ s->cache_missed = 0;
s->d = d;
s->recoverable = 1;
s->write = op_is_write(bio_op(bio));
@@ -697,8 +708,16 @@ static void cached_dev_read_error(struct closure *cl)
{
struct search *s = container_of(cl, struct search, cl);
struct bio *bio = &s->bio.bio;
+ struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
- if (s->recoverable) {
+ /*
+ * If cache device is dirty (dc->has_dirty is non-zero), then
+ * recovery a failed read request from cached device may get a
+ * stale data back. So read failure recovery is only permitted
+ * when cache device is clean.
+ */
+ if (s->recoverable &&
+ (dc && !atomic_read(&dc->has_dirty))) {
/* Retry from the backing device: */
trace_bcache_read_retry(s->orig_bio);
@@ -739,7 +758,7 @@ static void cached_dev_read_done(struct closure *cl)
s->cache_miss = NULL;
}
- if (verify(dc, &s->bio.bio) && s->recoverable && !s->read_dirty_data)
+ if (verify(dc) && s->recoverable && !s->read_dirty_data)
bch_data_verify(dc, s->orig_bio);
bio_complete(s);
@@ -759,12 +778,12 @@ static void cached_dev_read_done_bh(struct closure *cl)
struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
bch_mark_cache_accounting(s->iop.c, s->d,
- !s->cache_miss, s->iop.bypass);
+ !s->cache_missed, s->iop.bypass);
trace_bcache_read(s->orig_bio, !s->cache_miss, s->iop.bypass);
if (s->iop.status)
continue_at_nobarrier(cl, cached_dev_read_error, bcache_wq);
- else if (s->iop.bio || verify(dc, &s->bio.bio))
+ else if (s->iop.bio || verify(dc))
continue_at_nobarrier(cl, cached_dev_read_done, bcache_wq);
else
continue_at_nobarrier(cl, cached_dev_bio_complete, NULL);
@@ -778,6 +797,8 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
struct bio *miss, *cache_bio;
+ s->cache_missed = 1;
+
if (s->cache_miss || s->iop.bypass) {
miss = bio_next_split(bio, sectors, GFP_NOIO, s->d->bio_split);
ret = miss == bio ? MAP_DONE : MAP_CONTINUE;
@@ -891,7 +912,7 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s)
s->iop.bypass = true;
if (should_writeback(dc, s->orig_bio,
- cache_mode(dc, bio),
+ cache_mode(dc),
s->iop.bypass)) {
s->iop.bypass = false;
s->iop.writeback = true;
diff --git a/drivers/md/bcache/request.h b/drivers/md/bcache/request.h
index 7689176951ce..dea0886b81c1 100644
--- a/drivers/md/bcache/request.h
+++ b/drivers/md/bcache/request.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _BCACHE_REQUEST_H_
#define _BCACHE_REQUEST_H_
diff --git a/drivers/md/bcache/stats.c b/drivers/md/bcache/stats.c
index 0ca072c20d0d..d0831d5bcc87 100644
--- a/drivers/md/bcache/stats.c
+++ b/drivers/md/bcache/stats.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* bcache stats code
*
diff --git a/drivers/md/bcache/stats.h b/drivers/md/bcache/stats.h
index adbff141c887..0b70f9de0c03 100644
--- a/drivers/md/bcache/stats.h
+++ b/drivers/md/bcache/stats.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _BCACHE_STATS_H_
#define _BCACHE_STATS_H_
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index fc0a31b13ac4..b4d28928dec5 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -53,12 +53,15 @@ LIST_HEAD(bch_cache_sets);
static LIST_HEAD(uncached_devices);
static int bcache_major;
-static DEFINE_IDA(bcache_minor);
+static DEFINE_IDA(bcache_device_idx);
static wait_queue_head_t unregister_wait;
struct workqueue_struct *bcache_wq;
#define BTREE_MAX_PAGES (256 * 1024 / PAGE_SIZE)
-#define BCACHE_MINORS 16 /* partition support */
+/* limitation of partitions number on single bcache device */
+#define BCACHE_MINORS 128
+/* limitation of bcache devices number on single system */
+#define BCACHE_DEVICE_IDX_MAX ((1U << MINORBITS)/BCACHE_MINORS)
/* Superblock */
@@ -721,6 +724,16 @@ static void bcache_device_attach(struct bcache_device *d, struct cache_set *c,
closure_get(&c->caching);
}
+static inline int first_minor_to_idx(int first_minor)
+{
+ return (first_minor/BCACHE_MINORS);
+}
+
+static inline int idx_to_first_minor(int idx)
+{
+ return (idx * BCACHE_MINORS);
+}
+
static void bcache_device_free(struct bcache_device *d)
{
lockdep_assert_held(&bch_register_lock);
@@ -734,7 +747,8 @@ static void bcache_device_free(struct bcache_device *d)
if (d->disk && d->disk->queue)
blk_cleanup_queue(d->disk->queue);
if (d->disk) {
- ida_simple_remove(&bcache_minor, d->disk->first_minor);
+ ida_simple_remove(&bcache_device_idx,
+ first_minor_to_idx(d->disk->first_minor));
put_disk(d->disk);
}
@@ -751,7 +765,7 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size,
{
struct request_queue *q;
size_t n;
- int minor;
+ int idx;
if (!d->stripe_size)
d->stripe_size = 1 << 31;
@@ -776,25 +790,24 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size,
if (!d->full_dirty_stripes)
return -ENOMEM;
- minor = ida_simple_get(&bcache_minor, 0, MINORMASK + 1, GFP_KERNEL);
- if (minor < 0)
- return minor;
-
- minor *= BCACHE_MINORS;
+ idx = ida_simple_get(&bcache_device_idx, 0,
+ BCACHE_DEVICE_IDX_MAX, GFP_KERNEL);
+ if (idx < 0)
+ return idx;
if (!(d->bio_split = bioset_create(4, offsetof(struct bbio, bio),
BIOSET_NEED_BVECS |
BIOSET_NEED_RESCUER)) ||
!(d->disk = alloc_disk(BCACHE_MINORS))) {
- ida_simple_remove(&bcache_minor, minor);
+ ida_simple_remove(&bcache_device_idx, idx);
return -ENOMEM;
}
set_capacity(d->disk, sectors);
- snprintf(d->disk->disk_name, DISK_NAME_LEN, "bcache%i", minor);
+ snprintf(d->disk->disk_name, DISK_NAME_LEN, "bcache%i", idx);
d->disk->major = bcache_major;
- d->disk->first_minor = minor;
+ d->disk->first_minor = idx_to_first_minor(idx);
d->disk->fops = &bcache_ops;
d->disk->private_data = d;
@@ -889,7 +902,7 @@ static void cached_dev_detach_finish(struct work_struct *w)
closure_init_stack(&cl);
BUG_ON(!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags));
- BUG_ON(atomic_read(&dc->count));
+ BUG_ON(refcount_read(&dc->count));
mutex_lock(&bch_register_lock);
@@ -1016,7 +1029,7 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c)
* dc->c must be set before dc->count != 0 - paired with the mb in
* cached_dev_get()
*/
- atomic_set(&dc->count, 1);
+ refcount_set(&dc->count, 1);
/* Block writeback thread, but spawn it */
down_write(&dc->writeback_lock);
@@ -1028,7 +1041,7 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c)
if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) {
bch_sectors_dirty_init(&dc->disk);
atomic_set(&dc->has_dirty, 1);
- atomic_inc(&dc->count);
+ refcount_inc(&dc->count);
bch_writeback_queue(dc);
}
@@ -1129,9 +1142,6 @@ static int cached_dev_init(struct cached_dev *dc, unsigned block_size)
if (ret)
return ret;
- set_capacity(dc->disk.disk,
- dc->bdev->bd_part->nr_sects - dc->sb.data_offset);
-
dc->disk.disk->queue->backing_dev_info->ra_pages =
max(dc->disk.disk->queue->backing_dev_info->ra_pages,
q->backing_dev_info->ra_pages);
@@ -2085,6 +2095,7 @@ static void bcache_exit(void)
if (bcache_major)
unregister_blkdev(bcache_major, "bcache");
unregister_reboot_notifier(&reboot);
+ mutex_destroy(&bch_register_lock);
}
static int __init bcache_init(void)
@@ -2103,14 +2114,15 @@ static int __init bcache_init(void)
bcache_major = register_blkdev(0, "bcache");
if (bcache_major < 0) {
unregister_reboot_notifier(&reboot);
+ mutex_destroy(&bch_register_lock);
return bcache_major;
}
if (!(bcache_wq = alloc_workqueue("bcache", WQ_MEM_RECLAIM, 0)) ||
!(bcache_kobj = kobject_create_and_add("bcache", fs_kobj)) ||
- sysfs_create_files(bcache_kobj, files) ||
bch_request_init() ||
- bch_debug_init(bcache_kobj))
+ bch_debug_init(bcache_kobj) ||
+ sysfs_create_files(bcache_kobj, files))
goto err;
return 0;
diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
index 104c57cd666c..b4184092c727 100644
--- a/drivers/md/bcache/sysfs.c
+++ b/drivers/md/bcache/sysfs.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* bcache sysfs interfaces
*
@@ -81,8 +82,9 @@ rw_attribute(writeback_delay);
rw_attribute(writeback_rate);
rw_attribute(writeback_rate_update_seconds);
-rw_attribute(writeback_rate_d_term);
+rw_attribute(writeback_rate_i_term_inverse);
rw_attribute(writeback_rate_p_term_inverse);
+rw_attribute(writeback_rate_minimum);
read_attribute(writeback_rate_debug);
read_attribute(stripe_size);
@@ -130,15 +132,16 @@ SHOW(__bch_cached_dev)
sysfs_hprint(writeback_rate, dc->writeback_rate.rate << 9);
var_print(writeback_rate_update_seconds);
- var_print(writeback_rate_d_term);
+ var_print(writeback_rate_i_term_inverse);
var_print(writeback_rate_p_term_inverse);
+ var_print(writeback_rate_minimum);
if (attr == &sysfs_writeback_rate_debug) {
char rate[20];
char dirty[20];
char target[20];
char proportional[20];
- char derivative[20];
+ char integral[20];
char change[20];
s64 next_io;
@@ -146,7 +149,7 @@ SHOW(__bch_cached_dev)
bch_hprint(dirty, bcache_dev_sectors_dirty(&dc->disk) << 9);
bch_hprint(target, dc->writeback_rate_target << 9);
bch_hprint(proportional,dc->writeback_rate_proportional << 9);
- bch_hprint(derivative, dc->writeback_rate_derivative << 9);
+ bch_hprint(integral, dc->writeback_rate_integral_scaled << 9);
bch_hprint(change, dc->writeback_rate_change << 9);
next_io = div64_s64(dc->writeback_rate.next - local_clock(),
@@ -157,11 +160,11 @@ SHOW(__bch_cached_dev)
"dirty:\t\t%s\n"
"target:\t\t%s\n"
"proportional:\t%s\n"
- "derivative:\t%s\n"
+ "integral:\t%s\n"
"change:\t\t%s/sec\n"
"next io:\t%llims\n",
rate, dirty, target, proportional,
- derivative, change, next_io);
+ integral, change, next_io);
}
sysfs_hprint(dirty_data,
@@ -213,7 +216,7 @@ STORE(__cached_dev)
dc->writeback_rate.rate, 1, INT_MAX);
d_strtoul_nonzero(writeback_rate_update_seconds);
- d_strtoul(writeback_rate_d_term);
+ d_strtoul(writeback_rate_i_term_inverse);
d_strtoul_nonzero(writeback_rate_p_term_inverse);
d_strtoi_h(sequential_cutoff);
@@ -319,7 +322,7 @@ static struct attribute *bch_cached_dev_files[] = {
&sysfs_writeback_percent,
&sysfs_writeback_rate,
&sysfs_writeback_rate_update_seconds,
- &sysfs_writeback_rate_d_term,
+ &sysfs_writeback_rate_i_term_inverse,
&sysfs_writeback_rate_p_term_inverse,
&sysfs_writeback_rate_debug,
&sysfs_dirty_data,
@@ -745,6 +748,11 @@ static struct attribute *bch_cache_set_internal_files[] = {
};
KTYPE(bch_cache_set_internal);
+static int __bch_cache_cmp(const void *l, const void *r)
+{
+ return *((uint16_t *)r) - *((uint16_t *)l);
+}
+
SHOW(__bch_cache)
{
struct cache *ca = container_of(kobj, struct cache, kobj);
@@ -769,9 +777,6 @@ SHOW(__bch_cache)
CACHE_REPLACEMENT(&ca->sb));
if (attr == &sysfs_priority_stats) {
- int cmp(const void *l, const void *r)
- { return *((uint16_t *) r) - *((uint16_t *) l); }
-
struct bucket *b;
size_t n = ca->sb.nbuckets, i;
size_t unused = 0, available = 0, dirty = 0, meta = 0;
@@ -800,7 +805,7 @@ SHOW(__bch_cache)
p[i] = ca->buckets[i].prio;
mutex_unlock(&ca->set->bucket_lock);
- sort(p, n, sizeof(uint16_t), cmp, NULL);
+ sort(p, n, sizeof(uint16_t), __bch_cache_cmp, NULL);
while (n &&
!cached[n - 1])
diff --git a/drivers/md/bcache/sysfs.h b/drivers/md/bcache/sysfs.h
index 0526fe92a683..b54fe9602529 100644
--- a/drivers/md/bcache/sysfs.h
+++ b/drivers/md/bcache/sysfs.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _BCACHE_SYSFS_H_
#define _BCACHE_SYSFS_H_
diff --git a/drivers/md/bcache/trace.c b/drivers/md/bcache/trace.c
index b7820b0d2621..a9a73f560c04 100644
--- a/drivers/md/bcache/trace.c
+++ b/drivers/md/bcache/trace.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include "bcache.h"
#include "btree.h"
diff --git a/drivers/md/bcache/util.c b/drivers/md/bcache/util.c
index 176d3c2ef5f5..e548b8b51322 100644
--- a/drivers/md/bcache/util.c
+++ b/drivers/md/bcache/util.c
@@ -232,8 +232,14 @@ uint64_t bch_next_delay(struct bch_ratelimit *d, uint64_t done)
d->next += div_u64(done * NSEC_PER_SEC, d->rate);
- if (time_before64(now + NSEC_PER_SEC, d->next))
- d->next = now + NSEC_PER_SEC;
+ /* Bound the time. Don't let us fall further than 2 seconds behind
+ * (this prevents unnecessary backlog that would make it impossible
+ * to catch up). If we're ahead of the desired writeback rate,
+ * don't let us sleep more than 2.5 seconds (so we can notice/respond
+ * if the control system tells us to speed up!).
+ */
+ if (time_before64(now + NSEC_PER_SEC * 5LLU / 2LLU, d->next))
+ d->next = now + NSEC_PER_SEC * 5LLU / 2LLU;
if (time_after64(now - NSEC_PER_SEC * 2, d->next))
d->next = now - NSEC_PER_SEC * 2;
diff --git a/drivers/md/bcache/util.h b/drivers/md/bcache/util.h
index cb8d2ccbb6c6..ed5e8a412eb8 100644
--- a/drivers/md/bcache/util.h
+++ b/drivers/md/bcache/util.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _BCACHE_UTIL_H
#define _BCACHE_UTIL_H
@@ -441,10 +442,10 @@ struct bch_ratelimit {
uint64_t next;
/*
- * Rate at which we want to do work, in units per nanosecond
+ * Rate at which we want to do work, in units per second
* The units here correspond to the units passed to bch_next_delay()
*/
- unsigned rate;
+ uint32_t rate;
};
static inline void bch_ratelimit_reset(struct bch_ratelimit *d)
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index e663ca082183..56a37884ca8b 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* background writeback - scan btree for dirty data and write it to the backing
* device
@@ -25,48 +26,63 @@ static void __update_writeback_rate(struct cached_dev *dc)
bcache_flash_devs_sectors_dirty(c);
uint64_t cache_dirty_target =
div_u64(cache_sectors * dc->writeback_percent, 100);
-
int64_t target = div64_u64(cache_dirty_target * bdev_sectors(dc->bdev),
c->cached_dev_sectors);
- /* PD controller */
-
+ /*
+ * PI controller:
+ * Figures out the amount that should be written per second.
+ *
+ * First, the error (number of sectors that are dirty beyond our
+ * target) is calculated. The error is accumulated (numerically
+ * integrated).
+ *
+ * Then, the proportional value and integral value are scaled
+ * based on configured values. These are stored as inverses to
+ * avoid fixed point math and to make configuration easy-- e.g.
+ * the default value of 40 for writeback_rate_p_term_inverse
+ * attempts to write at a rate that would retire all the dirty
+ * blocks in 40 seconds.
+ *
+ * The writeback_rate_i_inverse value of 10000 means that 1/10000th
+ * of the error is accumulated in the integral term per second.
+ * This acts as a slow, long-term average that is not subject to
+ * variations in usage like the p term.
+ */
int64_t dirty = bcache_dev_sectors_dirty(&dc->disk);
- int64_t derivative = dirty - dc->disk.sectors_dirty_last;
- int64_t proportional = dirty - target;
- int64_t change;
-
- dc->disk.sectors_dirty_last = dirty;
-
- /* Scale to sectors per second */
-
- proportional *= dc->writeback_rate_update_seconds;
- proportional = div_s64(proportional, dc->writeback_rate_p_term_inverse);
-
- derivative = div_s64(derivative, dc->writeback_rate_update_seconds);
-
- derivative = ewma_add(dc->disk.sectors_dirty_derivative, derivative,
- (dc->writeback_rate_d_term /
- dc->writeback_rate_update_seconds) ?: 1, 0);
-
- derivative *= dc->writeback_rate_d_term;
- derivative = div_s64(derivative, dc->writeback_rate_p_term_inverse);
-
- change = proportional + derivative;
+ int64_t error = dirty - target;
+ int64_t proportional_scaled =
+ div_s64(error, dc->writeback_rate_p_term_inverse);
+ int64_t integral_scaled;
+ uint32_t new_rate;
+
+ if ((error < 0 && dc->writeback_rate_integral > 0) ||
+ (error > 0 && time_before64(local_clock(),
+ dc->writeback_rate.next + NSEC_PER_MSEC))) {
+ /*
+ * Only decrease the integral term if it's more than
+ * zero. Only increase the integral term if the device
+ * is keeping up. (Don't wind up the integral
+ * ineffectively in either case).
+ *
+ * It's necessary to scale this by
+ * writeback_rate_update_seconds to keep the integral
+ * term dimensioned properly.
+ */
+ dc->writeback_rate_integral += error *
+ dc->writeback_rate_update_seconds;
+ }
- /* Don't increase writeback rate if the device isn't keeping up */
- if (change > 0 &&
- time_after64(local_clock(),
- dc->writeback_rate.next + NSEC_PER_MSEC))
- change = 0;
+ integral_scaled = div_s64(dc->writeback_rate_integral,
+ dc->writeback_rate_i_term_inverse);
- dc->writeback_rate.rate =
- clamp_t(int64_t, (int64_t) dc->writeback_rate.rate + change,
- 1, NSEC_PER_MSEC);
+ new_rate = clamp_t(int32_t, (proportional_scaled + integral_scaled),
+ dc->writeback_rate_minimum, NSEC_PER_SEC);
- dc->writeback_rate_proportional = proportional;
- dc->writeback_rate_derivative = derivative;
- dc->writeback_rate_change = change;
+ dc->writeback_rate_proportional = proportional_scaled;
+ dc->writeback_rate_integral_scaled = integral_scaled;
+ dc->writeback_rate_change = new_rate - dc->writeback_rate.rate;
+ dc->writeback_rate.rate = new_rate;
dc->writeback_rate_target = target;
}
@@ -179,13 +195,21 @@ static void write_dirty(struct closure *cl)
struct dirty_io *io = container_of(cl, struct dirty_io, cl);
struct keybuf_key *w = io->bio.bi_private;
- dirty_init(w);
- bio_set_op_attrs(&io->bio, REQ_OP_WRITE, 0);
- io->bio.bi_iter.bi_sector = KEY_START(&w->key);
- bio_set_dev(&io->bio, io->dc->bdev);
- io->bio.bi_end_io = dirty_endio;
+ /*
+ * IO errors are signalled using the dirty bit on the key.
+ * If we failed to read, we should not attempt to write to the
+ * backing device. Instead, immediately go to write_dirty_finish
+ * to clean up.
+ */
+ if (KEY_DIRTY(&w->key)) {
+ dirty_init(w);
+ bio_set_op_attrs(&io->bio, REQ_OP_WRITE, 0);
+ io->bio.bi_iter.bi_sector = KEY_START(&w->key);
+ bio_set_dev(&io->bio, io->dc->bdev);
+ io->bio.bi_end_io = dirty_endio;
- closure_bio_submit(&io->bio, cl);
+ closure_bio_submit(&io->bio, cl);
+ }
continue_at(cl, write_dirty_finish, io->dc->writeback_write_wq);
}
@@ -417,6 +441,8 @@ static int bch_writeback_thread(void *arg)
struct cached_dev *dc = arg;
bool searched_full_index;
+ bch_ratelimit_reset(&dc->writeback_rate);
+
while (!kthread_should_stop()) {
down_write(&dc->writeback_lock);
if (!atomic_read(&dc->has_dirty) ||
@@ -444,7 +470,6 @@ static int bch_writeback_thread(void *arg)
up_write(&dc->writeback_lock);
- bch_ratelimit_reset(&dc->writeback_rate);
read_dirty(dc);
if (searched_full_index) {
@@ -454,6 +479,8 @@ static int bch_writeback_thread(void *arg)
!kthread_should_stop() &&
!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags))
delay = schedule_timeout_interruptible(delay);
+
+ bch_ratelimit_reset(&dc->writeback_rate);
}
}
@@ -491,8 +518,6 @@ void bch_sectors_dirty_init(struct bcache_device *d)
bch_btree_map_keys(&op.op, d->c, &KEY(op.inode, 0, 0),
sectors_dirty_init_fn, 0);
-
- d->sectors_dirty_last = bcache_dev_sectors_dirty(d);
}
void bch_cached_dev_writeback_init(struct cached_dev *dc)
@@ -506,10 +531,11 @@ void bch_cached_dev_writeback_init(struct cached_dev *dc)
dc->writeback_percent = 10;
dc->writeback_delay = 30;
dc->writeback_rate.rate = 1024;
+ dc->writeback_rate_minimum = 8;
dc->writeback_rate_update_seconds = 5;
- dc->writeback_rate_d_term = 30;
- dc->writeback_rate_p_term_inverse = 6000;
+ dc->writeback_rate_p_term_inverse = 40;
+ dc->writeback_rate_i_term_inverse = 10000;
INIT_DELAYED_WORK(&dc->writeback_rate_update, update_writeback_rate);
}
diff --git a/drivers/md/bcache/writeback.h b/drivers/md/bcache/writeback.h
index e35421d20d2e..a9e3ffb4b03c 100644
--- a/drivers/md/bcache/writeback.h
+++ b/drivers/md/bcache/writeback.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _BCACHE_WRITEBACK_H
#define _BCACHE_WRITEBACK_H
@@ -76,7 +77,9 @@ static inline bool should_writeback(struct cached_dev *dc, struct bio *bio,
if (would_skip)
return false;
- return op_is_sync(bio->bi_opf) || in_use <= CUTOFF_WRITEBACK;
+ return (op_is_sync(bio->bi_opf) ||
+ bio->bi_opf & (REQ_META|REQ_PRIO) ||
+ in_use <= CUTOFF_WRITEBACK);
}
static inline void bch_writeback_queue(struct cached_dev *dc)
@@ -89,7 +92,7 @@ static inline void bch_writeback_add(struct cached_dev *dc)
{
if (!atomic_read(&dc->has_dirty) &&
!atomic_xchg(&dc->has_dirty, 1)) {
- atomic_inc(&dc->count);
+ refcount_inc(&dc->count);
if (BDEV_STATE(&dc->sb) != BDEV_STATE_DIRTY) {
SET_BDEV_STATE(&dc->sb, BDEV_STATE_DIRTY);
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index d216a8f7bc22..33bb074d6941 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -347,7 +347,7 @@ static void __cache_size_refresh(void)
BUG_ON(!mutex_is_locked(&dm_bufio_clients_lock));
BUG_ON(dm_bufio_client_count < 0);
- dm_bufio_cache_size_latch = ACCESS_ONCE(dm_bufio_cache_size);
+ dm_bufio_cache_size_latch = READ_ONCE(dm_bufio_cache_size);
/*
* Use default if set to 0 and report the actual cache size used.
@@ -960,7 +960,7 @@ static void __get_memory_limit(struct dm_bufio_client *c,
{
unsigned long buffers;
- if (unlikely(ACCESS_ONCE(dm_bufio_cache_size) != dm_bufio_cache_size_latch)) {
+ if (unlikely(READ_ONCE(dm_bufio_cache_size) != dm_bufio_cache_size_latch)) {
if (mutex_trylock(&dm_bufio_clients_lock)) {
__cache_size_refresh();
mutex_unlock(&dm_bufio_clients_lock);
@@ -1600,7 +1600,7 @@ static bool __try_evict_buffer(struct dm_buffer *b, gfp_t gfp)
static unsigned long get_retain_buffers(struct dm_bufio_client *c)
{
- unsigned long retain_bytes = ACCESS_ONCE(dm_bufio_retain_bytes);
+ unsigned long retain_bytes = READ_ONCE(dm_bufio_retain_bytes);
return retain_bytes >> (c->sectors_per_block_bits + SECTOR_SHIFT);
}
@@ -1647,7 +1647,7 @@ dm_bufio_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
{
struct dm_bufio_client *c = container_of(shrink, struct dm_bufio_client, shrinker);
- return ACCESS_ONCE(c->n_buffers[LIST_CLEAN]) + ACCESS_ONCE(c->n_buffers[LIST_DIRTY]);
+ return READ_ONCE(c->n_buffers[LIST_CLEAN]) + READ_ONCE(c->n_buffers[LIST_DIRTY]);
}
/*
@@ -1818,7 +1818,7 @@ EXPORT_SYMBOL_GPL(dm_bufio_set_sector_offset);
static unsigned get_max_age_hz(void)
{
- unsigned max_age = ACCESS_ONCE(dm_bufio_max_age);
+ unsigned max_age = READ_ONCE(dm_bufio_max_age);
if (max_age > UINT_MAX / HZ)
max_age = UINT_MAX / HZ;
diff --git a/drivers/md/dm-builtin.c b/drivers/md/dm-builtin.c
index f092771878c2..8eb52e425141 100644
--- a/drivers/md/dm-builtin.c
+++ b/drivers/md/dm-builtin.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include "dm-core.h"
/*
diff --git a/drivers/md/dm-cache-background-tracker.c b/drivers/md/dm-cache-background-tracker.c
index 707233891291..1d0af0a21fc7 100644
--- a/drivers/md/dm-cache-background-tracker.c
+++ b/drivers/md/dm-cache-background-tracker.c
@@ -161,8 +161,17 @@ EXPORT_SYMBOL_GPL(btracker_nr_demotions_queued);
static bool max_work_reached(struct background_tracker *b)
{
- // FIXME: finish
- return false;
+ return atomic_read(&b->pending_promotes) +
+ atomic_read(&b->pending_writebacks) +
+ atomic_read(&b->pending_demotes) >= b->max_work;
+}
+
+struct bt_work *alloc_work(struct background_tracker *b)
+{
+ if (max_work_reached(b))
+ return NULL;
+
+ return kmem_cache_alloc(b->work_cache, GFP_NOWAIT);
}
int btracker_queue(struct background_tracker *b,
@@ -174,10 +183,7 @@ int btracker_queue(struct background_tracker *b,
if (pwork)
*pwork = NULL;
- if (max_work_reached(b))
- return -ENOMEM;
-
- w = kmem_cache_alloc(b->work_cache, GFP_NOWAIT);
+ w = alloc_work(b);
if (!w)
return -ENOMEM;
diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
index 4a4e9c75fc4c..0d7212410e21 100644
--- a/drivers/md/dm-cache-metadata.c
+++ b/drivers/md/dm-cache-metadata.c
@@ -13,6 +13,7 @@
#include "persistent-data/dm-transaction-manager.h"
#include <linux/device-mapper.h>
+#include <linux/refcount.h>
/*----------------------------------------------------------------*/
@@ -100,7 +101,7 @@ struct cache_disk_superblock {
} __packed;
struct dm_cache_metadata {
- atomic_t ref_count;
+ refcount_t ref_count;
struct list_head list;
unsigned version;
@@ -753,7 +754,7 @@ static struct dm_cache_metadata *metadata_open(struct block_device *bdev,
}
cmd->version = metadata_version;
- atomic_set(&cmd->ref_count, 1);
+ refcount_set(&cmd->ref_count, 1);
init_rwsem(&cmd->root_lock);
cmd->bdev = bdev;
cmd->data_block_size = data_block_size;
@@ -791,7 +792,7 @@ static struct dm_cache_metadata *lookup(struct block_device *bdev)
list_for_each_entry(cmd, &table, list)
if (cmd->bdev == bdev) {
- atomic_inc(&cmd->ref_count);
+ refcount_inc(&cmd->ref_count);
return cmd;
}
@@ -862,7 +863,7 @@ struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev,
void dm_cache_metadata_close(struct dm_cache_metadata *cmd)
{
- if (atomic_dec_and_test(&cmd->ref_count)) {
+ if (refcount_dec_and_test(&cmd->ref_count)) {
mutex_lock(&table_lock);
list_del(&cmd->list);
mutex_unlock(&table_lock);
diff --git a/drivers/md/dm-cache-policy-smq.c b/drivers/md/dm-cache-policy-smq.c
index e5eb9c9b4bc8..4ab23d0075f6 100644
--- a/drivers/md/dm-cache-policy-smq.c
+++ b/drivers/md/dm-cache-policy-smq.c
@@ -213,6 +213,19 @@ static void l_del(struct entry_space *es, struct ilist *l, struct entry *e)
l->nr_elts--;
}
+static struct entry *l_pop_head(struct entry_space *es, struct ilist *l)
+{
+ struct entry *e;
+
+ for (e = l_head(es, l); e; e = l_next(es, e))
+ if (!e->sentinel) {
+ l_del(es, l, e);
+ return e;
+ }
+
+ return NULL;
+}
+
static struct entry *l_pop_tail(struct entry_space *es, struct ilist *l)
{
struct entry *e;
@@ -719,7 +732,7 @@ static struct entry *alloc_entry(struct entry_alloc *ea)
if (l_empty(&ea->free))
return NULL;
- e = l_pop_tail(ea->es, &ea->free);
+ e = l_pop_head(ea->es, &ea->free);
init_entry(e);
ea->nr_allocated++;
@@ -1158,13 +1171,13 @@ static void clear_pending(struct smq_policy *mq, struct entry *e)
e->pending_work = false;
}
-static void queue_writeback(struct smq_policy *mq)
+static void queue_writeback(struct smq_policy *mq, bool idle)
{
int r;
struct policy_work work;
struct entry *e;
- e = q_peek(&mq->dirty, mq->dirty.nr_levels, !mq->migrations_allowed);
+ e = q_peek(&mq->dirty, mq->dirty.nr_levels, idle);
if (e) {
mark_pending(mq, e);
q_del(&mq->dirty, e);
@@ -1174,12 +1187,16 @@ static void queue_writeback(struct smq_policy *mq)
work.cblock = infer_cblock(mq, e);
r = btracker_queue(mq->bg_work, &work, NULL);
- WARN_ON_ONCE(r); // FIXME: finish, I think we have to get rid of this race.
+ if (r) {
+ clear_pending(mq, e);
+ q_push_front(&mq->dirty, e);
+ }
}
}
static void queue_demotion(struct smq_policy *mq)
{
+ int r;
struct policy_work work;
struct entry *e;
@@ -1189,7 +1206,7 @@ static void queue_demotion(struct smq_policy *mq)
e = q_peek(&mq->clean, mq->clean.nr_levels / 2, true);
if (!e) {
if (!clean_target_met(mq, true))
- queue_writeback(mq);
+ queue_writeback(mq, false);
return;
}
@@ -1199,12 +1216,17 @@ static void queue_demotion(struct smq_policy *mq)
work.op = POLICY_DEMOTE;
work.oblock = e->oblock;
work.cblock = infer_cblock(mq, e);
- btracker_queue(mq->bg_work, &work, NULL);
+ r = btracker_queue(mq->bg_work, &work, NULL);
+ if (r) {
+ clear_pending(mq, e);
+ q_push_front(&mq->clean, e);
+ }
}
static void queue_promotion(struct smq_policy *mq, dm_oblock_t oblock,
struct policy_work **workp)
{
+ int r;
struct entry *e;
struct policy_work work;
@@ -1234,7 +1256,9 @@ static void queue_promotion(struct smq_policy *mq, dm_oblock_t oblock,
work.op = POLICY_PROMOTE;
work.oblock = oblock;
work.cblock = infer_cblock(mq, e);
- btracker_queue(mq->bg_work, &work, workp);
+ r = btracker_queue(mq->bg_work, &work, workp);
+ if (r)
+ free_entry(&mq->cache_alloc, e);
}
/*----------------------------------------------------------------*/
@@ -1418,7 +1442,7 @@ static int smq_get_background_work(struct dm_cache_policy *p, bool idle,
r = btracker_issue(mq->bg_work, result);
if (r == -ENODATA) {
if (!clean_target_met(mq, idle)) {
- queue_writeback(mq);
+ queue_writeback(mq, idle);
r = btracker_issue(mq->bg_work, result);
}
}
@@ -1778,7 +1802,7 @@ static struct dm_cache_policy *__smq_create(dm_cblock_t cache_size,
mq->next_hotspot_period = jiffies;
mq->next_cache_period = jiffies;
- mq->bg_work = btracker_create(10240); /* FIXME: hard coded value */
+ mq->bg_work = btracker_create(4096); /* FIXME: hard coded value */
if (!mq->bg_work)
goto bad_btracker;
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 8785134c9f1f..cf23a14f9c6a 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -408,9 +408,7 @@ struct cache {
int sectors_per_block_shift;
spinlock_t lock;
- struct list_head deferred_cells;
struct bio_list deferred_bios;
- struct bio_list deferred_writethrough_bios;
sector_t migration_threshold;
wait_queue_head_t migration_wait;
atomic_t nr_allocated_migrations;
@@ -446,10 +444,10 @@ struct cache {
struct dm_kcopyd_client *copier;
struct workqueue_struct *wq;
struct work_struct deferred_bio_worker;
- struct work_struct deferred_writethrough_worker;
struct work_struct migration_worker;
struct delayed_work waker;
struct dm_bio_prison_v2 *prison;
+ struct bio_set *bs;
mempool_t *migration_pool;
@@ -490,15 +488,6 @@ struct per_bio_data {
struct dm_bio_prison_cell_v2 *cell;
struct dm_hook_info hook_info;
sector_t len;
-
- /*
- * writethrough fields. These MUST remain at the end of this
- * structure and the 'cache' member must be the first as it
- * is used to determine the offset of the writethrough fields.
- */
- struct cache *cache;
- dm_cblock_t cblock;
- struct dm_bio_details bio_details;
};
struct dm_cache_migration {
@@ -515,19 +504,19 @@ struct dm_cache_migration {
/*----------------------------------------------------------------*/
-static bool writethrough_mode(struct cache_features *f)
+static bool writethrough_mode(struct cache *cache)
{
- return f->io_mode == CM_IO_WRITETHROUGH;
+ return cache->features.io_mode == CM_IO_WRITETHROUGH;
}
-static bool writeback_mode(struct cache_features *f)
+static bool writeback_mode(struct cache *cache)
{
- return f->io_mode == CM_IO_WRITEBACK;
+ return cache->features.io_mode == CM_IO_WRITEBACK;
}
-static inline bool passthrough_mode(struct cache_features *f)
+static inline bool passthrough_mode(struct cache *cache)
{
- return unlikely(f->io_mode == CM_IO_PASSTHROUGH);
+ return unlikely(cache->features.io_mode == CM_IO_PASSTHROUGH);
}
/*----------------------------------------------------------------*/
@@ -537,14 +526,9 @@ static void wake_deferred_bio_worker(struct cache *cache)
queue_work(cache->wq, &cache->deferred_bio_worker);
}
-static void wake_deferred_writethrough_worker(struct cache *cache)
-{
- queue_work(cache->wq, &cache->deferred_writethrough_worker);
-}
-
static void wake_migration_worker(struct cache *cache)
{
- if (passthrough_mode(&cache->features))
+ if (passthrough_mode(cache))
return;
queue_work(cache->wq, &cache->migration_worker);
@@ -567,10 +551,13 @@ static struct dm_cache_migration *alloc_migration(struct cache *cache)
struct dm_cache_migration *mg;
mg = mempool_alloc(cache->migration_pool, GFP_NOWAIT);
- if (mg) {
- mg->cache = cache;
- atomic_inc(&mg->cache->nr_allocated_migrations);
- }
+ if (!mg)
+ return NULL;
+
+ memset(mg, 0, sizeof(*mg));
+
+ mg->cache = cache;
+ atomic_inc(&cache->nr_allocated_migrations);
return mg;
}
@@ -618,27 +605,16 @@ static unsigned lock_level(struct bio *bio)
* Per bio data
*--------------------------------------------------------------*/
-/*
- * If using writeback, leave out struct per_bio_data's writethrough fields.
- */
-#define PB_DATA_SIZE_WB (offsetof(struct per_bio_data, cache))
-#define PB_DATA_SIZE_WT (sizeof(struct per_bio_data))
-
-static size_t get_per_bio_data_size(struct cache *cache)
+static struct per_bio_data *get_per_bio_data(struct bio *bio)
{
- return writethrough_mode(&cache->features) ? PB_DATA_SIZE_WT : PB_DATA_SIZE_WB;
-}
-
-static struct per_bio_data *get_per_bio_data(struct bio *bio, size_t data_size)
-{
- struct per_bio_data *pb = dm_per_bio_data(bio, data_size);
+ struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data));
BUG_ON(!pb);
return pb;
}
-static struct per_bio_data *init_per_bio_data(struct bio *bio, size_t data_size)
+static struct per_bio_data *init_per_bio_data(struct bio *bio)
{
- struct per_bio_data *pb = get_per_bio_data(bio, data_size);
+ struct per_bio_data *pb = get_per_bio_data(bio);
pb->tick = false;
pb->req_nr = dm_bio_get_target_bio_nr(bio);
@@ -678,7 +654,6 @@ static void defer_bios(struct cache *cache, struct bio_list *bios)
static bool bio_detain_shared(struct cache *cache, dm_oblock_t oblock, struct bio *bio)
{
bool r;
- size_t pb_size;
struct per_bio_data *pb;
struct dm_cell_key_v2 key;
dm_oblock_t end = to_oblock(from_oblock(oblock) + 1ULL);
@@ -703,8 +678,7 @@ static bool bio_detain_shared(struct cache *cache, dm_oblock_t oblock, struct bi
if (cell != cell_prealloc)
free_prison_cell(cache, cell_prealloc);
- pb_size = get_per_bio_data_size(cache);
- pb = get_per_bio_data(bio, pb_size);
+ pb = get_per_bio_data(bio);
pb->cell = cell;
return r;
@@ -856,28 +830,35 @@ static void remap_to_cache(struct cache *cache, struct bio *bio,
static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio)
{
unsigned long flags;
- size_t pb_data_size = get_per_bio_data_size(cache);
- struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
+ struct per_bio_data *pb;
spin_lock_irqsave(&cache->lock, flags);
if (cache->need_tick_bio && !op_is_flush(bio->bi_opf) &&
bio_op(bio) != REQ_OP_DISCARD) {
+ pb = get_per_bio_data(bio);
pb->tick = true;
cache->need_tick_bio = false;
}
spin_unlock_irqrestore(&cache->lock, flags);
}
-static void remap_to_origin_clear_discard(struct cache *cache, struct bio *bio,
- dm_oblock_t oblock)
+static void __remap_to_origin_clear_discard(struct cache *cache, struct bio *bio,
+ dm_oblock_t oblock, bool bio_has_pbd)
{
- // FIXME: this is called way too much.
- check_if_tick_bio_needed(cache, bio);
+ if (bio_has_pbd)
+ check_if_tick_bio_needed(cache, bio);
remap_to_origin(cache, bio);
if (bio_data_dir(bio) == WRITE)
clear_discard(cache, oblock_to_dblock(cache, oblock));
}
+static void remap_to_origin_clear_discard(struct cache *cache, struct bio *bio,
+ dm_oblock_t oblock)
+{
+ // FIXME: check_if_tick_bio_needed() is called way too much through this interface
+ __remap_to_origin_clear_discard(cache, bio, oblock, true);
+}
+
static void remap_to_cache_dirty(struct cache *cache, struct bio *bio,
dm_oblock_t oblock, dm_cblock_t cblock)
{
@@ -908,10 +889,10 @@ static bool accountable_bio(struct cache *cache, struct bio *bio)
static void accounted_begin(struct cache *cache, struct bio *bio)
{
- size_t pb_data_size = get_per_bio_data_size(cache);
- struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
+ struct per_bio_data *pb;
if (accountable_bio(cache, bio)) {
+ pb = get_per_bio_data(bio);
pb->len = bio_sectors(bio);
iot_io_begin(&cache->tracker, pb->len);
}
@@ -919,8 +900,7 @@ static void accounted_begin(struct cache *cache, struct bio *bio)
static void accounted_complete(struct cache *cache, struct bio *bio)
{
- size_t pb_data_size = get_per_bio_data_size(cache);
- struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
+ struct per_bio_data *pb = get_per_bio_data(bio);
iot_io_end(&cache->tracker, pb->len);
}
@@ -937,57 +917,26 @@ static void issue_op(struct bio *bio, void *context)
accounted_request(cache, bio);
}
-static void defer_writethrough_bio(struct cache *cache, struct bio *bio)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&cache->lock, flags);
- bio_list_add(&cache->deferred_writethrough_bios, bio);
- spin_unlock_irqrestore(&cache->lock, flags);
-
- wake_deferred_writethrough_worker(cache);
-}
-
-static void writethrough_endio(struct bio *bio)
-{
- struct per_bio_data *pb = get_per_bio_data(bio, PB_DATA_SIZE_WT);
-
- dm_unhook_bio(&pb->hook_info, bio);
-
- if (bio->bi_status) {
- bio_endio(bio);
- return;
- }
-
- dm_bio_restore(&pb->bio_details, bio);
- remap_to_cache(pb->cache, bio, pb->cblock);
-
- /*
- * We can't issue this bio directly, since we're in interrupt
- * context. So it gets put on a bio list for processing by the
- * worker thread.
- */
- defer_writethrough_bio(pb->cache, bio);
-}
-
/*
- * FIXME: send in parallel, huge latency as is.
* When running in writethrough mode we need to send writes to clean blocks
- * to both the cache and origin devices. In future we'd like to clone the
- * bio and send them in parallel, but for now we're doing them in
- * series as this is easier.
+ * to both the cache and origin devices. Clone the bio and send them in parallel.
*/
-static void remap_to_origin_then_cache(struct cache *cache, struct bio *bio,
- dm_oblock_t oblock, dm_cblock_t cblock)
+static void remap_to_origin_and_cache(struct cache *cache, struct bio *bio,
+ dm_oblock_t oblock, dm_cblock_t cblock)
{
- struct per_bio_data *pb = get_per_bio_data(bio, PB_DATA_SIZE_WT);
+ struct bio *origin_bio = bio_clone_fast(bio, GFP_NOIO, cache->bs);
+
+ BUG_ON(!origin_bio);
- pb->cache = cache;
- pb->cblock = cblock;
- dm_hook_bio(&pb->hook_info, bio, writethrough_endio, NULL);
- dm_bio_record(&pb->bio_details, bio);
+ bio_chain(origin_bio, bio);
+ /*
+ * Passing false to __remap_to_origin_clear_discard() skips
+ * all code that might use per_bio_data (since clone doesn't have it)
+ */
+ __remap_to_origin_clear_discard(cache, origin_bio, oblock, false);
+ submit_bio(origin_bio);
- remap_to_origin_clear_discard(pb->cache, bio, oblock);
+ remap_to_cache(cache, bio, cblock);
}
/*----------------------------------------------------------------
@@ -1201,6 +1150,18 @@ static void background_work_end(struct cache *cache)
/*----------------------------------------------------------------*/
+static bool bio_writes_complete_block(struct cache *cache, struct bio *bio)
+{
+ return (bio_data_dir(bio) == WRITE) &&
+ (bio->bi_iter.bi_size == (cache->sectors_per_block << SECTOR_SHIFT));
+}
+
+static bool optimisable_bio(struct cache *cache, struct bio *bio, dm_oblock_t block)
+{
+ return writeback_mode(cache) &&
+ (is_discarded_oblock(cache, block) || bio_writes_complete_block(cache, bio));
+}
+
static void quiesce(struct dm_cache_migration *mg,
void (*continuation)(struct work_struct *))
{
@@ -1248,8 +1209,7 @@ static int copy(struct dm_cache_migration *mg, bool promote)
static void bio_drop_shared_lock(struct cache *cache, struct bio *bio)
{
- size_t pb_data_size = get_per_bio_data_size(cache);
- struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
+ struct per_bio_data *pb = get_per_bio_data(bio);
if (pb->cell && dm_cell_put_v2(cache->prison, pb->cell))
free_prison_cell(cache, pb->cell);
@@ -1260,23 +1220,21 @@ static void overwrite_endio(struct bio *bio)
{
struct dm_cache_migration *mg = bio->bi_private;
struct cache *cache = mg->cache;
- size_t pb_data_size = get_per_bio_data_size(cache);
- struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
+ struct per_bio_data *pb = get_per_bio_data(bio);
dm_unhook_bio(&pb->hook_info, bio);
if (bio->bi_status)
mg->k.input = bio->bi_status;
- queue_continuation(mg->cache->wq, &mg->k);
+ queue_continuation(cache->wq, &mg->k);
}
static void overwrite(struct dm_cache_migration *mg,
void (*continuation)(struct work_struct *))
{
struct bio *bio = mg->overwrite_bio;
- size_t pb_data_size = get_per_bio_data_size(mg->cache);
- struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
+ struct per_bio_data *pb = get_per_bio_data(bio);
dm_hook_bio(&pb->hook_info, bio, overwrite_endio, mg);
@@ -1474,13 +1432,51 @@ static void mg_upgrade_lock(struct work_struct *ws)
}
}
+static void mg_full_copy(struct work_struct *ws)
+{
+ struct dm_cache_migration *mg = ws_to_mg(ws);
+ struct cache *cache = mg->cache;
+ struct policy_work *op = mg->op;
+ bool is_policy_promote = (op->op == POLICY_PROMOTE);
+
+ if ((!is_policy_promote && !is_dirty(cache, op->cblock)) ||
+ is_discarded_oblock(cache, op->oblock)) {
+ mg_upgrade_lock(ws);
+ return;
+ }
+
+ init_continuation(&mg->k, mg_upgrade_lock);
+
+ if (copy(mg, is_policy_promote)) {
+ DMERR_LIMIT("%s: migration copy failed", cache_device_name(cache));
+ mg->k.input = BLK_STS_IOERR;
+ mg_complete(mg, false);
+ }
+}
+
static void mg_copy(struct work_struct *ws)
{
- int r;
struct dm_cache_migration *mg = ws_to_mg(ws);
if (mg->overwrite_bio) {
/*
+ * No exclusive lock was held when we last checked if the bio
+ * was optimisable. So we have to check again in case things
+ * have changed (eg, the block may no longer be discarded).
+ */
+ if (!optimisable_bio(mg->cache, mg->overwrite_bio, mg->op->oblock)) {
+ /*
+ * Fallback to a real full copy after doing some tidying up.
+ */
+ bool rb = bio_detain_shared(mg->cache, mg->op->oblock, mg->overwrite_bio);
+ BUG_ON(rb); /* An exclussive lock must _not_ be held for this block */
+ mg->overwrite_bio = NULL;
+ inc_io_migrations(mg->cache);
+ mg_full_copy(ws);
+ return;
+ }
+
+ /*
* It's safe to do this here, even though it's new data
* because all IO has been locked out of the block.
*
@@ -1489,26 +1485,8 @@ static void mg_copy(struct work_struct *ws)
*/
overwrite(mg, mg_update_metadata_after_copy);
- } else {
- struct cache *cache = mg->cache;
- struct policy_work *op = mg->op;
- bool is_policy_promote = (op->op == POLICY_PROMOTE);
-
- if ((!is_policy_promote && !is_dirty(cache, op->cblock)) ||
- is_discarded_oblock(cache, op->oblock)) {
- mg_upgrade_lock(ws);
- return;
- }
-
- init_continuation(&mg->k, mg_upgrade_lock);
-
- r = copy(mg, is_policy_promote);
- if (r) {
- DMERR_LIMIT("%s: migration copy failed", cache_device_name(cache));
- mg->k.input = BLK_STS_IOERR;
- mg_complete(mg, false);
- }
- }
+ } else
+ mg_full_copy(ws);
}
static int mg_lock_writes(struct dm_cache_migration *mg)
@@ -1567,9 +1545,6 @@ static int mg_start(struct cache *cache, struct policy_work *op, struct bio *bio
return -ENOMEM;
}
- memset(mg, 0, sizeof(*mg));
-
- mg->cache = cache;
mg->op = op;
mg->overwrite_bio = bio;
@@ -1703,9 +1678,6 @@ static int invalidate_start(struct cache *cache, dm_cblock_t cblock,
return -ENOMEM;
}
- memset(mg, 0, sizeof(*mg));
-
- mg->cache = cache;
mg->overwrite_bio = bio;
mg->invalidate_cblock = cblock;
mg->invalidate_oblock = oblock;
@@ -1748,26 +1720,12 @@ static void inc_miss_counter(struct cache *cache, struct bio *bio)
/*----------------------------------------------------------------*/
-static bool bio_writes_complete_block(struct cache *cache, struct bio *bio)
-{
- return (bio_data_dir(bio) == WRITE) &&
- (bio->bi_iter.bi_size == (cache->sectors_per_block << SECTOR_SHIFT));
-}
-
-static bool optimisable_bio(struct cache *cache, struct bio *bio, dm_oblock_t block)
-{
- return writeback_mode(&cache->features) &&
- (is_discarded_oblock(cache, block) || bio_writes_complete_block(cache, bio));
-}
-
static int map_bio(struct cache *cache, struct bio *bio, dm_oblock_t block,
bool *commit_needed)
{
int r, data_dir;
bool rb, background_queued;
dm_cblock_t cblock;
- size_t pb_data_size = get_per_bio_data_size(cache);
- struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
*commit_needed = false;
@@ -1816,6 +1774,8 @@ static int map_bio(struct cache *cache, struct bio *bio, dm_oblock_t block,
}
if (r == -ENOENT) {
+ struct per_bio_data *pb = get_per_bio_data(bio);
+
/*
* Miss.
*/
@@ -1823,7 +1783,6 @@ static int map_bio(struct cache *cache, struct bio *bio, dm_oblock_t block,
if (pb->req_nr == 0) {
accounted_begin(cache, bio);
remap_to_origin_clear_discard(cache, bio, block);
-
} else {
/*
* This is a duplicate writethrough io that is no
@@ -1842,18 +1801,17 @@ static int map_bio(struct cache *cache, struct bio *bio, dm_oblock_t block,
* Passthrough always maps to the origin, invalidating any
* cache blocks that are written to.
*/
- if (passthrough_mode(&cache->features)) {
+ if (passthrough_mode(cache)) {
if (bio_data_dir(bio) == WRITE) {
bio_drop_shared_lock(cache, bio);
atomic_inc(&cache->stats.demotion);
invalidate_start(cache, cblock, block, bio);
} else
remap_to_origin_clear_discard(cache, bio, block);
-
} else {
- if (bio_data_dir(bio) == WRITE && writethrough_mode(&cache->features) &&
+ if (bio_data_dir(bio) == WRITE && writethrough_mode(cache) &&
!is_dirty(cache, cblock)) {
- remap_to_origin_then_cache(cache, bio, block, cblock);
+ remap_to_origin_and_cache(cache, bio, block, cblock);
accounted_begin(cache, bio);
} else
remap_to_cache_dirty(cache, bio, block, cblock);
@@ -1922,8 +1880,7 @@ static blk_status_t commit_op(void *context)
static bool process_flush_bio(struct cache *cache, struct bio *bio)
{
- size_t pb_data_size = get_per_bio_data_size(cache);
- struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
+ struct per_bio_data *pb = get_per_bio_data(bio);
if (!pb->req_nr)
remap_to_origin(cache, bio);
@@ -1983,28 +1940,6 @@ static void process_deferred_bios(struct work_struct *ws)
schedule_commit(&cache->committer);
}
-static void process_deferred_writethrough_bios(struct work_struct *ws)
-{
- struct cache *cache = container_of(ws, struct cache, deferred_writethrough_worker);
-
- unsigned long flags;
- struct bio_list bios;
- struct bio *bio;
-
- bio_list_init(&bios);
-
- spin_lock_irqsave(&cache->lock, flags);
- bio_list_merge(&bios, &cache->deferred_writethrough_bios);
- bio_list_init(&cache->deferred_writethrough_bios);
- spin_unlock_irqrestore(&cache->lock, flags);
-
- /*
- * These bios have already been through accounted_begin()
- */
- while ((bio = bio_list_pop(&bios)))
- generic_make_request(bio);
-}
-
/*----------------------------------------------------------------
* Main worker loop
*--------------------------------------------------------------*/
@@ -2112,6 +2047,9 @@ static void destroy(struct cache *cache)
kfree(cache->ctr_args[i]);
kfree(cache->ctr_args);
+ if (cache->bs)
+ bioset_free(cache->bs);
+
kfree(cache);
}
@@ -2555,8 +2493,15 @@ static int cache_create(struct cache_args *ca, struct cache **result)
ti->discards_supported = true;
ti->split_discard_bios = false;
+ ti->per_io_data_size = sizeof(struct per_bio_data);
+
cache->features = ca->features;
- ti->per_io_data_size = get_per_bio_data_size(cache);
+ if (writethrough_mode(cache)) {
+ /* Create bioset for writethrough bios issued to origin */
+ cache->bs = bioset_create(BIO_POOL_SIZE, 0, 0);
+ if (!cache->bs)
+ goto bad;
+ }
cache->callbacks.congested_fn = cache_is_congested;
dm_table_add_target_callbacks(ti->table, &cache->callbacks);
@@ -2618,7 +2563,7 @@ static int cache_create(struct cache_args *ca, struct cache **result)
goto bad;
}
- if (passthrough_mode(&cache->features)) {
+ if (passthrough_mode(cache)) {
bool all_clean;
r = dm_cache_metadata_all_clean(cache->cmd, &all_clean);
@@ -2637,9 +2582,7 @@ static int cache_create(struct cache_args *ca, struct cache **result)
}
spin_lock_init(&cache->lock);
- INIT_LIST_HEAD(&cache->deferred_cells);
bio_list_init(&cache->deferred_bios);
- bio_list_init(&cache->deferred_writethrough_bios);
atomic_set(&cache->nr_allocated_migrations, 0);
atomic_set(&cache->nr_io_migrations, 0);
init_waitqueue_head(&cache->migration_wait);
@@ -2678,8 +2621,6 @@ static int cache_create(struct cache_args *ca, struct cache **result)
goto bad;
}
INIT_WORK(&cache->deferred_bio_worker, process_deferred_bios);
- INIT_WORK(&cache->deferred_writethrough_worker,
- process_deferred_writethrough_bios);
INIT_WORK(&cache->migration_worker, check_migrations);
INIT_DELAYED_WORK(&cache->waker, do_waker);
@@ -2795,9 +2736,8 @@ static int cache_map(struct dm_target *ti, struct bio *bio)
int r;
bool commit_needed;
dm_oblock_t block = get_bio_block(cache, bio);
- size_t pb_data_size = get_per_bio_data_size(cache);
- init_per_bio_data(bio, pb_data_size);
+ init_per_bio_data(bio);
if (unlikely(from_oblock(block) >= from_oblock(cache->origin_blocks))) {
/*
* This can only occur if the io goes to a partial block at
@@ -2821,13 +2761,11 @@ static int cache_map(struct dm_target *ti, struct bio *bio)
return r;
}
-static int cache_end_io(struct dm_target *ti, struct bio *bio,
- blk_status_t *error)
+static int cache_end_io(struct dm_target *ti, struct bio *bio, blk_status_t *error)
{
struct cache *cache = ti->private;
unsigned long flags;
- size_t pb_data_size = get_per_bio_data_size(cache);
- struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
+ struct per_bio_data *pb = get_per_bio_data(bio);
if (pb->tick) {
policy_tick(cache->policy, false);
@@ -3243,13 +3181,13 @@ static void cache_status(struct dm_target *ti, status_type_t type,
else
DMEMIT("1 ");
- if (writethrough_mode(&cache->features))
+ if (writethrough_mode(cache))
DMEMIT("writethrough ");
- else if (passthrough_mode(&cache->features))
+ else if (passthrough_mode(cache))
DMEMIT("passthrough ");
- else if (writeback_mode(&cache->features))
+ else if (writeback_mode(cache))
DMEMIT("writeback ");
else {
@@ -3415,7 +3353,7 @@ static int process_invalidate_cblocks_message(struct cache *cache, unsigned coun
unsigned i;
struct cblock_range range;
- if (!passthrough_mode(&cache->features)) {
+ if (!passthrough_mode(cache)) {
DMERR("%s: cache has to be in passthrough mode for invalidation",
cache_device_name(cache));
return -EPERM;
diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h
index 24eddbdf2ab4..6a14f945783c 100644
--- a/drivers/md/dm-core.h
+++ b/drivers/md/dm-core.h
@@ -29,7 +29,6 @@ struct dm_kobject_holder {
* DM targets must _not_ deference a mapped_device to directly access its members!
*/
struct mapped_device {
- struct srcu_struct io_barrier;
struct mutex suspend_lock;
/*
@@ -127,6 +126,8 @@ struct mapped_device {
struct blk_mq_tag_set *tag_set;
bool use_blk_mq:1;
bool init_tio_pdu:1;
+
+ struct srcu_struct io_barrier;
};
void dm_init_md_queue(struct mapped_device *md);
@@ -149,5 +150,6 @@ static inline bool dm_message_test_buffer_overflow(char *result, unsigned maxlen
extern atomic_t dm_global_event_nr;
extern wait_queue_head_t dm_global_eventq;
+void dm_issue_global_event(void);
#endif
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index a55ffd4f5933..9fc12f556534 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -1075,7 +1075,7 @@ static int crypt_convert_block_aead(struct crypt_config *cc,
BUG_ON(cc->integrity_iv_size && cc->integrity_iv_size != cc->iv_size);
/* Reject unexpected unaligned bio. */
- if (unlikely(bv_in.bv_offset & (cc->sector_size - 1)))
+ if (unlikely(bv_in.bv_len & (cc->sector_size - 1)))
return -EIO;
dmreq = dmreq_of_req(cc, req);
@@ -1168,7 +1168,7 @@ static int crypt_convert_block_skcipher(struct crypt_config *cc,
int r = 0;
/* Reject unexpected unaligned bio. */
- if (unlikely(bv_in.bv_offset & (cc->sector_size - 1)))
+ if (unlikely(bv_in.bv_len & (cc->sector_size - 1)))
return -EIO;
dmreq = dmreq_of_req(cc, req);
@@ -2466,6 +2466,7 @@ static int crypt_ctr_cipher_old(struct dm_target *ti, char *cipher_in, char *key
kfree(cipher_api);
return ret;
}
+ kfree(cipher_api);
return 0;
bad_mem:
@@ -2584,6 +2585,10 @@ static int crypt_ctr_optional(struct dm_target *ti, unsigned int argc, char **ar
ti->error = "Invalid feature value for sector_size";
return -EINVAL;
}
+ if (ti->len & ((cc->sector_size >> SECTOR_SHIFT) - 1)) {
+ ti->error = "Device size is not multiple of sector_size feature";
+ return -EINVAL;
+ }
cc->sector_shift = __ffs(cc->sector_size) - SECTOR_SHIFT;
} else if (!strcasecmp(opt_string, "iv_large_sectors"))
set_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags);
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index 096fe9b66c50..61180783ef42 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -6,6 +6,7 @@
* This file is released under the GPL.
*/
+#include <linux/compiler.h>
#include <linux/module.h>
#include <linux/device-mapper.h>
#include <linux/dm-io.h>
@@ -80,13 +81,13 @@ struct journal_entry {
#define journal_entry_tag(ic, je) ((__u8 *)&(je)->last_bytes[(ic)->sectors_per_block])
#if BITS_PER_LONG == 64
-#define journal_entry_set_sector(je, x) do { smp_wmb(); ACCESS_ONCE((je)->u.sector) = cpu_to_le64(x); } while (0)
+#define journal_entry_set_sector(je, x) do { smp_wmb(); WRITE_ONCE((je)->u.sector, cpu_to_le64(x)); } while (0)
#define journal_entry_get_sector(je) le64_to_cpu((je)->u.sector)
#elif defined(CONFIG_LBDAF)
-#define journal_entry_set_sector(je, x) do { (je)->u.s.sector_lo = cpu_to_le32(x); smp_wmb(); ACCESS_ONCE((je)->u.s.sector_hi) = cpu_to_le32((x) >> 32); } while (0)
+#define journal_entry_set_sector(je, x) do { (je)->u.s.sector_lo = cpu_to_le32(x); smp_wmb(); WRITE_ONCE((je)->u.s.sector_hi, cpu_to_le32((x) >> 32)); } while (0)
#define journal_entry_get_sector(je) le64_to_cpu((je)->u.sector)
#else
-#define journal_entry_set_sector(je, x) do { (je)->u.s.sector_lo = cpu_to_le32(x); smp_wmb(); ACCESS_ONCE((je)->u.s.sector_hi) = cpu_to_le32(0); } while (0)
+#define journal_entry_set_sector(je, x) do { (je)->u.s.sector_lo = cpu_to_le32(x); smp_wmb(); WRITE_ONCE((je)->u.s.sector_hi, cpu_to_le32(0)); } while (0)
#define journal_entry_get_sector(je) le32_to_cpu((je)->u.s.sector_lo)
#endif
#define journal_entry_is_unused(je) ((je)->u.s.sector_hi == cpu_to_le32(-1))
@@ -320,7 +321,7 @@ static void dm_integrity_io_error(struct dm_integrity_c *ic, const char *msg, in
static int dm_integrity_failed(struct dm_integrity_c *ic)
{
- return ACCESS_ONCE(ic->failed);
+ return READ_ONCE(ic->failed);
}
static commit_id_t dm_integrity_commit_id(struct dm_integrity_c *ic, unsigned i,
@@ -1376,7 +1377,7 @@ static int dm_integrity_map(struct dm_target *ti, struct bio *bio)
struct bvec_iter iter;
struct bio_vec bv;
bio_for_each_segment(bv, bio, iter) {
- if (unlikely((bv.bv_offset | bv.bv_len) & ((ic->sectors_per_block << SECTOR_SHIFT) - 1))) {
+ if (unlikely(bv.bv_len & ((ic->sectors_per_block << SECTOR_SHIFT) - 1))) {
DMERR("Bio vector (%u,%u) is not aligned on %u-sector boundary",
bv.bv_offset, bv.bv_len, ic->sectors_per_block);
return DM_MAPIO_KILL;
@@ -1545,7 +1546,7 @@ retry_kmap:
smp_mb();
if (unlikely(waitqueue_active(&ic->copy_to_journal_wait)))
wake_up(&ic->copy_to_journal_wait);
- if (ACCESS_ONCE(ic->free_sectors) <= ic->free_sectors_threshold) {
+ if (READ_ONCE(ic->free_sectors) <= ic->free_sectors_threshold) {
queue_work(ic->commit_wq, &ic->commit_work);
} else {
schedule_autocommit(ic);
@@ -1798,7 +1799,7 @@ static void integrity_commit(struct work_struct *w)
ic->n_committed_sections += commit_sections;
spin_unlock_irq(&ic->endio_wait.lock);
- if (ACCESS_ONCE(ic->free_sectors) <= ic->free_sectors_threshold)
+ if (READ_ONCE(ic->free_sectors) <= ic->free_sectors_threshold)
queue_work(ic->writer_wq, &ic->writer_work);
release_flush_bios:
@@ -1980,7 +1981,7 @@ static void integrity_writer(struct work_struct *w)
unsigned prev_free_sectors;
/* the following test is not needed, but it tests the replay code */
- if (ACCESS_ONCE(ic->suspending))
+ if (READ_ONCE(ic->suspending))
return;
spin_lock_irq(&ic->endio_wait.lock);
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index 8756a6850431..e52676fa9832 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -477,9 +477,13 @@ static int remove_all(struct file *filp, struct dm_ioctl *param, size_t param_si
* Round up the ptr to an 8-byte boundary.
*/
#define ALIGN_MASK 7
+static inline size_t align_val(size_t val)
+{
+ return (val + ALIGN_MASK) & ~ALIGN_MASK;
+}
static inline void *align_ptr(void *ptr)
{
- return (void *) (((size_t) (ptr + ALIGN_MASK)) & ~ALIGN_MASK);
+ return (void *)align_val((size_t)ptr);
}
/*
@@ -505,7 +509,7 @@ static int list_devices(struct file *filp, struct dm_ioctl *param, size_t param_
struct hash_cell *hc;
size_t len, needed = 0;
struct gendisk *disk;
- struct dm_name_list *nl, *old_nl = NULL;
+ struct dm_name_list *orig_nl, *nl, *old_nl = NULL;
uint32_t *event_nr;
down_write(&_hash_lock);
@@ -516,17 +520,15 @@ static int list_devices(struct file *filp, struct dm_ioctl *param, size_t param_
*/
for (i = 0; i < NUM_BUCKETS; i++) {
list_for_each_entry (hc, _name_buckets + i, name_list) {
- needed += sizeof(struct dm_name_list);
- needed += strlen(hc->name) + 1;
- needed += ALIGN_MASK;
- needed += (sizeof(uint32_t) + ALIGN_MASK) & ~ALIGN_MASK;
+ needed += align_val(offsetof(struct dm_name_list, name) + strlen(hc->name) + 1);
+ needed += align_val(sizeof(uint32_t));
}
}
/*
* Grab our output buffer.
*/
- nl = get_result_buffer(param, param_size, &len);
+ nl = orig_nl = get_result_buffer(param, param_size, &len);
if (len < needed) {
param->flags |= DM_BUFFER_FULL_FLAG;
goto out;
@@ -549,11 +551,16 @@ static int list_devices(struct file *filp, struct dm_ioctl *param, size_t param_
strcpy(nl->name, hc->name);
old_nl = nl;
- event_nr = align_ptr(((void *) (nl + 1)) + strlen(hc->name) + 1);
+ event_nr = align_ptr(nl->name + strlen(hc->name) + 1);
*event_nr = dm_get_event_nr(hc->md);
nl = align_ptr(event_nr + 1);
}
}
+ /*
+ * If mismatch happens, security may be compromised due to buffer
+ * overflow, so it's better to crash.
+ */
+ BUG_ON((char *)nl - (char *)orig_nl != needed);
out:
up_write(&_hash_lock);
@@ -1621,7 +1628,8 @@ static int target_message(struct file *filp, struct dm_ioctl *param, size_t para
* which has a variable size, is not used by the function processing
* the ioctl.
*/
-#define IOCTL_FLAGS_NO_PARAMS 1
+#define IOCTL_FLAGS_NO_PARAMS 1
+#define IOCTL_FLAGS_ISSUE_GLOBAL_EVENT 2
/*-----------------------------------------------------------------
* Implementation of open/close/ioctl on the special char
@@ -1635,12 +1643,12 @@ static ioctl_fn lookup_ioctl(unsigned int cmd, int *ioctl_flags)
ioctl_fn fn;
} _ioctls[] = {
{DM_VERSION_CMD, 0, NULL}, /* version is dealt with elsewhere */
- {DM_REMOVE_ALL_CMD, IOCTL_FLAGS_NO_PARAMS, remove_all},
+ {DM_REMOVE_ALL_CMD, IOCTL_FLAGS_NO_PARAMS | IOCTL_FLAGS_ISSUE_GLOBAL_EVENT, remove_all},
{DM_LIST_DEVICES_CMD, 0, list_devices},
- {DM_DEV_CREATE_CMD, IOCTL_FLAGS_NO_PARAMS, dev_create},
- {DM_DEV_REMOVE_CMD, IOCTL_FLAGS_NO_PARAMS, dev_remove},
- {DM_DEV_RENAME_CMD, 0, dev_rename},
+ {DM_DEV_CREATE_CMD, IOCTL_FLAGS_NO_PARAMS | IOCTL_FLAGS_ISSUE_GLOBAL_EVENT, dev_create},
+ {DM_DEV_REMOVE_CMD, IOCTL_FLAGS_NO_PARAMS | IOCTL_FLAGS_ISSUE_GLOBAL_EVENT, dev_remove},
+ {DM_DEV_RENAME_CMD, IOCTL_FLAGS_ISSUE_GLOBAL_EVENT, dev_rename},
{DM_DEV_SUSPEND_CMD, IOCTL_FLAGS_NO_PARAMS, dev_suspend},
{DM_DEV_STATUS_CMD, IOCTL_FLAGS_NO_PARAMS, dev_status},
{DM_DEV_WAIT_CMD, 0, dev_wait},
@@ -1869,6 +1877,9 @@ static int ctl_ioctl(struct file *file, uint command, struct dm_ioctl __user *us
unlikely(ioctl_flags & IOCTL_FLAGS_NO_PARAMS))
DMERR("ioctl %d tried to output some data but has IOCTL_FLAGS_NO_PARAMS set", cmd);
+ if (!r && ioctl_flags & IOCTL_FLAGS_ISSUE_GLOBAL_EVENT)
+ dm_issue_global_event();
+
/*
* Copy the results back to userland.
*/
diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c
index cf2c67e35eaf..eb45cc3df31d 100644
--- a/drivers/md/dm-kcopyd.c
+++ b/drivers/md/dm-kcopyd.c
@@ -107,7 +107,7 @@ static void io_job_start(struct dm_kcopyd_throttle *t)
try_again:
spin_lock_irq(&throttle_spinlock);
- throttle = ACCESS_ONCE(t->throttle);
+ throttle = READ_ONCE(t->throttle);
if (likely(throttle >= 100))
goto skip_limit;
@@ -157,7 +157,7 @@ static void io_job_finish(struct dm_kcopyd_throttle *t)
t->num_io_jobs--;
- if (likely(ACCESS_ONCE(t->throttle) >= 100))
+ if (likely(READ_ONCE(t->throttle) >= 100))
goto skip_limit;
if (!t->num_io_jobs) {
diff --git a/drivers/md/dm-log-writes.c b/drivers/md/dm-log-writes.c
index 8b80a9ce9ea9..189badbeddaf 100644
--- a/drivers/md/dm-log-writes.c
+++ b/drivers/md/dm-log-writes.c
@@ -10,9 +10,11 @@
#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/bio.h>
+#include <linux/dax.h>
#include <linux/slab.h>
#include <linux/kthread.h>
#include <linux/freezer.h>
+#include <linux/uio.h>
#define DM_MSG_PREFIX "log-writes"
@@ -246,27 +248,108 @@ error:
return -1;
}
+static int write_inline_data(struct log_writes_c *lc, void *entry,
+ size_t entrylen, void *data, size_t datalen,
+ sector_t sector)
+{
+ int num_pages, bio_pages, pg_datalen, pg_sectorlen, i;
+ struct page *page;
+ struct bio *bio;
+ size_t ret;
+ void *ptr;
+
+ while (datalen) {
+ num_pages = ALIGN(datalen, PAGE_SIZE) >> PAGE_SHIFT;
+ bio_pages = min(num_pages, BIO_MAX_PAGES);
+
+ atomic_inc(&lc->io_blocks);
+
+ bio = bio_alloc(GFP_KERNEL, bio_pages);
+ if (!bio) {
+ DMERR("Couldn't alloc inline data bio");
+ goto error;
+ }
+
+ bio->bi_iter.bi_size = 0;
+ bio->bi_iter.bi_sector = sector;
+ bio_set_dev(bio, lc->logdev->bdev);
+ bio->bi_end_io = log_end_io;
+ bio->bi_private = lc;
+ bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
+
+ for (i = 0; i < bio_pages; i++) {
+ pg_datalen = min_t(int, datalen, PAGE_SIZE);
+ pg_sectorlen = ALIGN(pg_datalen, lc->sectorsize);
+
+ page = alloc_page(GFP_KERNEL);
+ if (!page) {
+ DMERR("Couldn't alloc inline data page");
+ goto error_bio;
+ }
+
+ ptr = kmap_atomic(page);
+ memcpy(ptr, data, pg_datalen);
+ if (pg_sectorlen > pg_datalen)
+ memset(ptr + pg_datalen, 0, pg_sectorlen - pg_datalen);
+ kunmap_atomic(ptr);
+
+ ret = bio_add_page(bio, page, pg_sectorlen, 0);
+ if (ret != pg_sectorlen) {
+ DMERR("Couldn't add page of inline data");
+ __free_page(page);
+ goto error_bio;
+ }
+
+ datalen -= pg_datalen;
+ data += pg_datalen;
+ }
+ submit_bio(bio);
+
+ sector += bio_pages * PAGE_SECTORS;
+ }
+ return 0;
+error_bio:
+ bio_free_pages(bio);
+ bio_put(bio);
+error:
+ put_io_block(lc);
+ return -1;
+}
+
static int log_one_block(struct log_writes_c *lc,
struct pending_block *block, sector_t sector)
{
struct bio *bio;
struct log_write_entry entry;
- size_t ret;
+ size_t metadatalen, ret;
int i;
entry.sector = cpu_to_le64(block->sector);
entry.nr_sectors = cpu_to_le64(block->nr_sectors);
entry.flags = cpu_to_le64(block->flags);
entry.data_len = cpu_to_le64(block->datalen);
+
+ metadatalen = (block->flags & LOG_MARK_FLAG) ? block->datalen : 0;
if (write_metadata(lc, &entry, sizeof(entry), block->data,
- block->datalen, sector)) {
+ metadatalen, sector)) {
free_pending_block(lc, block);
return -1;
}
+ sector += dev_to_bio_sectors(lc, 1);
+
+ if (block->datalen && metadatalen == 0) {
+ if (write_inline_data(lc, &entry, sizeof(entry), block->data,
+ block->datalen, sector)) {
+ free_pending_block(lc, block);
+ return -1;
+ }
+ /* we don't support both inline data & bio data */
+ goto out;
+ }
+
if (!block->vec_cnt)
goto out;
- sector += dev_to_bio_sectors(lc, 1);
atomic_inc(&lc->io_blocks);
bio = bio_alloc(GFP_KERNEL, min(block->vec_cnt, BIO_MAX_PAGES));
@@ -527,6 +610,51 @@ static int log_mark(struct log_writes_c *lc, char *data)
return 0;
}
+static int log_dax(struct log_writes_c *lc, sector_t sector, size_t bytes,
+ struct iov_iter *i)
+{
+ struct pending_block *block;
+
+ if (!bytes)
+ return 0;
+
+ block = kzalloc(sizeof(struct pending_block), GFP_KERNEL);
+ if (!block) {
+ DMERR("Error allocating dax pending block");
+ return -ENOMEM;
+ }
+
+ block->data = kzalloc(bytes, GFP_KERNEL);
+ if (!block->data) {
+ DMERR("Error allocating dax data space");
+ kfree(block);
+ return -ENOMEM;
+ }
+
+ /* write data provided via the iterator */
+ if (!copy_from_iter(block->data, bytes, i)) {
+ DMERR("Error copying dax data");
+ kfree(block->data);
+ kfree(block);
+ return -EIO;
+ }
+
+ /* rewind the iterator so that the block driver can use it */
+ iov_iter_revert(i, bytes);
+
+ block->datalen = bytes;
+ block->sector = bio_to_dev_sectors(lc, sector);
+ block->nr_sectors = ALIGN(bytes, lc->sectorsize) >> lc->sectorshift;
+
+ atomic_inc(&lc->pending_blocks);
+ spin_lock_irq(&lc->blocks_lock);
+ list_add_tail(&block->list, &lc->unflushed_blocks);
+ spin_unlock_irq(&lc->blocks_lock);
+ wake_up_process(lc->log_kthread);
+
+ return 0;
+}
+
static void log_writes_dtr(struct dm_target *ti)
{
struct log_writes_c *lc = ti->private;
@@ -792,9 +920,46 @@ static void log_writes_io_hints(struct dm_target *ti, struct queue_limits *limit
limits->io_min = limits->physical_block_size;
}
+static long log_writes_dax_direct_access(struct dm_target *ti, pgoff_t pgoff,
+ long nr_pages, void **kaddr, pfn_t *pfn)
+{
+ struct log_writes_c *lc = ti->private;
+ sector_t sector = pgoff * PAGE_SECTORS;
+ int ret;
+
+ ret = bdev_dax_pgoff(lc->dev->bdev, sector, nr_pages * PAGE_SIZE, &pgoff);
+ if (ret)
+ return ret;
+ return dax_direct_access(lc->dev->dax_dev, pgoff, nr_pages, kaddr, pfn);
+}
+
+static size_t log_writes_dax_copy_from_iter(struct dm_target *ti,
+ pgoff_t pgoff, void *addr, size_t bytes,
+ struct iov_iter *i)
+{
+ struct log_writes_c *lc = ti->private;
+ sector_t sector = pgoff * PAGE_SECTORS;
+ int err;
+
+ if (bdev_dax_pgoff(lc->dev->bdev, sector, ALIGN(bytes, PAGE_SIZE), &pgoff))
+ return 0;
+
+ /* Don't bother doing anything if logging has been disabled */
+ if (!lc->logging_enabled)
+ goto dax_copy;
+
+ err = log_dax(lc, sector, bytes, i);
+ if (err) {
+ DMWARN("Error %d logging DAX write", err);
+ return 0;
+ }
+dax_copy:
+ return dax_copy_from_iter(lc->dev->dax_dev, pgoff, addr, bytes, i);
+}
+
static struct target_type log_writes_target = {
.name = "log-writes",
- .version = {1, 0, 0},
+ .version = {1, 1, 0},
.module = THIS_MODULE,
.ctr = log_writes_ctr,
.dtr = log_writes_dtr,
@@ -805,6 +970,8 @@ static struct target_type log_writes_target = {
.message = log_writes_message,
.iterate_devices = log_writes_iterate_devices,
.io_hints = log_writes_io_hints,
+ .direct_access = log_writes_dax_direct_access,
+ .dax_copy_from_iter = log_writes_dax_copy_from_iter,
};
static int __init dm_log_writes_init(void)
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 11f273d2f018..3f88c9d32f7e 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -366,7 +366,7 @@ static struct pgpath *choose_path_in_pg(struct multipath *m,
pgpath = path_to_pgpath(path);
- if (unlikely(lockless_dereference(m->current_pg) != pg)) {
+ if (unlikely(READ_ONCE(m->current_pg) != pg)) {
/* Only update current_pgpath if pg changed */
spin_lock_irqsave(&m->lock, flags);
m->current_pgpath = pgpath;
@@ -390,7 +390,7 @@ static struct pgpath *choose_pgpath(struct multipath *m, size_t nr_bytes)
}
/* Were we instructed to switch PG? */
- if (lockless_dereference(m->next_pg)) {
+ if (READ_ONCE(m->next_pg)) {
spin_lock_irqsave(&m->lock, flags);
pg = m->next_pg;
if (!pg) {
@@ -406,7 +406,7 @@ static struct pgpath *choose_pgpath(struct multipath *m, size_t nr_bytes)
/* Don't change PG until it has no remaining paths */
check_current_pg:
- pg = lockless_dereference(m->current_pg);
+ pg = READ_ONCE(m->current_pg);
if (pg) {
pgpath = choose_path_in_pg(m, pg, nr_bytes);
if (!IS_ERR_OR_NULL(pgpath))
@@ -473,7 +473,7 @@ static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
struct request *clone;
/* Do we need to select a new pgpath? */
- pgpath = lockless_dereference(m->current_pgpath);
+ pgpath = READ_ONCE(m->current_pgpath);
if (!pgpath || !test_bit(MPATHF_QUEUE_IO, &m->flags))
pgpath = choose_pgpath(m, nr_bytes);
@@ -535,7 +535,7 @@ static int __multipath_map_bio(struct multipath *m, struct bio *bio, struct dm_m
bool queue_io;
/* Do we need to select a new pgpath? */
- pgpath = lockless_dereference(m->current_pgpath);
+ pgpath = READ_ONCE(m->current_pgpath);
queue_io = test_bit(MPATHF_QUEUE_IO, &m->flags);
if (!pgpath || !queue_io)
pgpath = choose_pgpath(m, nr_bytes);
@@ -1804,7 +1804,7 @@ static int multipath_prepare_ioctl(struct dm_target *ti,
struct pgpath *current_pgpath;
int r;
- current_pgpath = lockless_dereference(m->current_pgpath);
+ current_pgpath = READ_ONCE(m->current_pgpath);
if (!current_pgpath)
current_pgpath = choose_pgpath(m, 0);
@@ -1826,7 +1826,7 @@ static int multipath_prepare_ioctl(struct dm_target *ti,
}
if (r == -ENOTCONN) {
- if (!lockless_dereference(m->current_pg)) {
+ if (!READ_ONCE(m->current_pg)) {
/* Path status changed, redo selection */
(void) choose_pgpath(m, 0);
}
@@ -1895,9 +1895,9 @@ static int multipath_busy(struct dm_target *ti)
return (m->queue_mode != DM_TYPE_MQ_REQUEST_BASED);
/* Guess which priority_group will be used at next mapping time */
- pg = lockless_dereference(m->current_pg);
- next_pg = lockless_dereference(m->next_pg);
- if (unlikely(!lockless_dereference(m->current_pgpath) && next_pg))
+ pg = READ_ONCE(m->current_pg);
+ next_pg = READ_ONCE(m->next_pg);
+ if (unlikely(!READ_ONCE(m->current_pgpath) && next_pg))
pg = next_pg;
if (!pg) {
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 8b1d93114f40..366c625b9591 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -2143,13 +2143,6 @@ static int super_load(struct md_rdev *rdev, struct md_rdev *refdev)
struct dm_raid_superblock *refsb;
uint64_t events_sb, events_refsb;
- rdev->sb_start = 0;
- rdev->sb_size = bdev_logical_block_size(rdev->meta_bdev);
- if (rdev->sb_size < sizeof(*sb) || rdev->sb_size > PAGE_SIZE) {
- DMERR("superblock size of a logical block is no longer valid");
- return -EINVAL;
- }
-
r = read_disk_sb(rdev, rdev->sb_size, false);
if (r)
return r;
@@ -2494,6 +2487,17 @@ static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs)
if (test_bit(Journal, &rdev->flags))
continue;
+ if (!rdev->meta_bdev)
+ continue;
+
+ /* Set superblock offset/size for metadata device. */
+ rdev->sb_start = 0;
+ rdev->sb_size = bdev_logical_block_size(rdev->meta_bdev);
+ if (rdev->sb_size < sizeof(struct dm_raid_superblock) || rdev->sb_size > PAGE_SIZE) {
+ DMERR("superblock size of a logical block is no longer valid");
+ return -EINVAL;
+ }
+
/*
* Skipping super_load due to CTR_FLAG_SYNC will cause
* the array to undergo initialization again as
@@ -2506,9 +2510,6 @@ static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs)
if (test_bit(__CTR_FLAG_SYNC, &rs->ctr_flags))
continue;
- if (!rdev->meta_bdev)
- continue;
-
r = super_load(rdev, freshest);
switch (r) {
@@ -3297,11 +3298,10 @@ static const char *__raid_dev_status(struct raid_set *rs, struct md_rdev *rdev,
static sector_t rs_get_progress(struct raid_set *rs,
sector_t resync_max_sectors, bool *array_in_sync)
{
- sector_t r, recovery_cp, curr_resync_completed;
+ sector_t r, curr_resync_completed;
struct mddev *mddev = &rs->md;
curr_resync_completed = mddev->curr_resync_completed ?: mddev->recovery_cp;
- recovery_cp = mddev->recovery_cp;
*array_in_sync = false;
if (rs_is_raid0(rs)) {
@@ -3330,9 +3330,11 @@ static sector_t rs_get_progress(struct raid_set *rs,
} else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
r = curr_resync_completed;
else
- r = recovery_cp;
+ r = mddev->recovery_cp;
- if (r == MaxSector) {
+ if ((r == MaxSector) ||
+ (test_bit(MD_RECOVERY_DONE, &mddev->recovery) &&
+ (mddev->curr_resync_completed == resync_max_sectors))) {
/*
* Sync complete.
*/
@@ -3898,7 +3900,7 @@ static void raid_resume(struct dm_target *ti)
static struct target_type raid_target = {
.name = "raid",
- .version = {1, 12, 1},
+ .version = {1, 13, 0},
.module = THIS_MODULE,
.ctr = raid_ctr,
.dtr = raid_dtr,
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index eadfcfd106ff..9d32f25489c2 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -56,7 +56,7 @@ static unsigned dm_get_blk_mq_queue_depth(void)
int dm_request_based(struct mapped_device *md)
{
- return blk_queue_stackable(md->queue);
+ return queue_is_rq_based(md->queue);
}
static void dm_old_start_queue(struct request_queue *q)
diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c
index 6028d8247f58..29bc51084c82 100644
--- a/drivers/md/dm-stats.c
+++ b/drivers/md/dm-stats.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/errno.h>
#include <linux/numa.h>
#include <linux/slab.h>
@@ -431,7 +432,7 @@ do_sync_free:
synchronize_rcu_expedited();
dm_stat_free(&s->rcu_head);
} else {
- ACCESS_ONCE(dm_stat_need_rcu_barrier) = 1;
+ WRITE_ONCE(dm_stat_need_rcu_barrier, 1);
call_rcu(&s->rcu_head, dm_stat_free);
}
return 0;
@@ -639,12 +640,12 @@ void dm_stats_account_io(struct dm_stats *stats, unsigned long bi_rw,
*/
last = raw_cpu_ptr(stats->last);
stats_aux->merged =
- (bi_sector == (ACCESS_ONCE(last->last_sector) &&
+ (bi_sector == (READ_ONCE(last->last_sector) &&
((bi_rw == WRITE) ==
- (ACCESS_ONCE(last->last_rw) == WRITE))
+ (READ_ONCE(last->last_rw) == WRITE))
));
- ACCESS_ONCE(last->last_sector) = end_sector;
- ACCESS_ONCE(last->last_rw) = bi_rw;
+ WRITE_ONCE(last->last_sector, end_sector);
+ WRITE_ONCE(last->last_rw, bi_rw);
}
rcu_read_lock();
@@ -693,22 +694,22 @@ static void __dm_stat_init_temporary_percpu_totals(struct dm_stat_shared *shared
for_each_possible_cpu(cpu) {
p = &s->stat_percpu[cpu][x];
- shared->tmp.sectors[READ] += ACCESS_ONCE(p->sectors[READ]);
- shared->tmp.sectors[WRITE] += ACCESS_ONCE(p->sectors[WRITE]);
- shared->tmp.ios[READ] += ACCESS_ONCE(p->ios[READ]);
- shared->tmp.ios[WRITE] += ACCESS_ONCE(p->ios[WRITE]);
- shared->tmp.merges[READ] += ACCESS_ONCE(p->merges[READ]);
- shared->tmp.merges[WRITE] += ACCESS_ONCE(p->merges[WRITE]);
- shared->tmp.ticks[READ] += ACCESS_ONCE(p->ticks[READ]);
- shared->tmp.ticks[WRITE] += ACCESS_ONCE(p->ticks[WRITE]);
- shared->tmp.io_ticks[READ] += ACCESS_ONCE(p->io_ticks[READ]);
- shared->tmp.io_ticks[WRITE] += ACCESS_ONCE(p->io_ticks[WRITE]);
- shared->tmp.io_ticks_total += ACCESS_ONCE(p->io_ticks_total);
- shared->tmp.time_in_queue += ACCESS_ONCE(p->time_in_queue);
+ shared->tmp.sectors[READ] += READ_ONCE(p->sectors[READ]);
+ shared->tmp.sectors[WRITE] += READ_ONCE(p->sectors[WRITE]);
+ shared->tmp.ios[READ] += READ_ONCE(p->ios[READ]);
+ shared->tmp.ios[WRITE] += READ_ONCE(p->ios[WRITE]);
+ shared->tmp.merges[READ] += READ_ONCE(p->merges[READ]);
+ shared->tmp.merges[WRITE] += READ_ONCE(p->merges[WRITE]);
+ shared->tmp.ticks[READ] += READ_ONCE(p->ticks[READ]);
+ shared->tmp.ticks[WRITE] += READ_ONCE(p->ticks[WRITE]);
+ shared->tmp.io_ticks[READ] += READ_ONCE(p->io_ticks[READ]);
+ shared->tmp.io_ticks[WRITE] += READ_ONCE(p->io_ticks[WRITE]);
+ shared->tmp.io_ticks_total += READ_ONCE(p->io_ticks_total);
+ shared->tmp.time_in_queue += READ_ONCE(p->time_in_queue);
if (s->n_histogram_entries) {
unsigned i;
for (i = 0; i < s->n_histogram_entries + 1; i++)
- shared->tmp.histogram[i] += ACCESS_ONCE(p->histogram[i]);
+ shared->tmp.histogram[i] += READ_ONCE(p->histogram[i]);
}
}
}
diff --git a/drivers/md/dm-stats.h b/drivers/md/dm-stats.h
index f1c0956e3843..2ddfae678f32 100644
--- a/drivers/md/dm-stats.h
+++ b/drivers/md/dm-stats.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef DM_STATS_H
#define DM_STATS_H
diff --git a/drivers/md/dm-switch.c b/drivers/md/dm-switch.c
index 4c8de1ff78ca..8d0ba879777e 100644
--- a/drivers/md/dm-switch.c
+++ b/drivers/md/dm-switch.c
@@ -144,7 +144,7 @@ static unsigned switch_region_table_read(struct switch_ctx *sctx, unsigned long
switch_get_position(sctx, region_nr, &region_index, &bit);
- return (ACCESS_ONCE(sctx->region_table[region_index]) >> bit) &
+ return (READ_ONCE(sctx->region_table[region_index]) >> bit) &
((1 << sctx->region_table_entry_bits) - 1);
}
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index ef7b8f201f73..433bd3f3014c 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -451,15 +451,15 @@ int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode,
return r;
}
- atomic_set(&dd->count, 0);
+ refcount_set(&dd->count, 1);
list_add(&dd->list, &t->devices);
} else if (dd->dm_dev->mode != (mode | dd->dm_dev->mode)) {
r = upgrade_mode(dd, mode, t->md);
if (r)
return r;
+ refcount_inc(&dd->count);
}
- atomic_inc(&dd->count);
*result = dd->dm_dev;
return 0;
@@ -515,7 +515,7 @@ void dm_put_device(struct dm_target *ti, struct dm_dev *d)
dm_device_name(ti->table->md), d->name);
return;
}
- if (atomic_dec_and_test(&dd->count)) {
+ if (refcount_dec_and_test(&dd->count)) {
dm_put_table_device(ti->table->md, d);
list_del(&dd->list);
kfree(dd);
@@ -1000,7 +1000,7 @@ verify_rq_based:
list_for_each_entry(dd, devices, list) {
struct request_queue *q = bdev_get_queue(dd->dm_dev->bdev);
- if (!blk_queue_stackable(q)) {
+ if (!queue_is_rq_based(q)) {
DMERR("table load rejected: including"
" non-request-stackable devices");
return -EINVAL;
@@ -1847,19 +1847,6 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
*/
if (blk_queue_add_random(q) && dm_table_all_devices_attribute(t, device_is_not_random))
queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q);
-
- /*
- * QUEUE_FLAG_STACKABLE must be set after all queue settings are
- * visible to other CPUs because, once the flag is set, incoming bios
- * are processed by request-based dm, which refers to the queue
- * settings.
- * Until the flag set, bios are passed to bio-based dm and queued to
- * md->deferred where queue settings are not needed yet.
- * Those bios are passed to request-based dm at the resume time.
- */
- smp_mb();
- if (dm_table_request_based(t))
- queue_flag_set_unlocked(QUEUE_FLAG_STACKABLE, q);
}
unsigned int dm_table_get_num_targets(struct dm_table *t)
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 1e25705209c2..89e5dff9b4cf 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -2431,7 +2431,7 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
struct pool_c *pt = pool->ti->private;
bool needs_check = dm_pool_metadata_needs_check(pool->pmd);
enum pool_mode old_mode = get_pool_mode(pool);
- unsigned long no_space_timeout = ACCESS_ONCE(no_space_timeout_secs) * HZ;
+ unsigned long no_space_timeout = READ_ONCE(no_space_timeout_secs) * HZ;
/*
* Never allow the pool to transition to PM_WRITE mode if user
diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
index bda3caca23ca..aedb8222836b 100644
--- a/drivers/md/dm-verity-target.c
+++ b/drivers/md/dm-verity-target.c
@@ -92,74 +92,33 @@ static sector_t verity_position_at_level(struct dm_verity *v, sector_t block,
return block >> (level * v->hash_per_block_bits);
}
-/*
- * Callback function for asynchrnous crypto API completion notification
- */
-static void verity_op_done(struct crypto_async_request *base, int err)
-{
- struct verity_result *res = (struct verity_result *)base->data;
-
- if (err == -EINPROGRESS)
- return;
-
- res->err = err;
- complete(&res->completion);
-}
-
-/*
- * Wait for async crypto API callback
- */
-static inline int verity_complete_op(struct verity_result *res, int ret)
-{
- switch (ret) {
- case 0:
- break;
-
- case -EINPROGRESS:
- case -EBUSY:
- ret = wait_for_completion_interruptible(&res->completion);
- if (!ret)
- ret = res->err;
- reinit_completion(&res->completion);
- break;
-
- default:
- DMERR("verity_wait_hash: crypto op submission failed: %d", ret);
- }
-
- if (unlikely(ret < 0))
- DMERR("verity_wait_hash: crypto op failed: %d", ret);
-
- return ret;
-}
-
static int verity_hash_update(struct dm_verity *v, struct ahash_request *req,
const u8 *data, size_t len,
- struct verity_result *res)
+ struct crypto_wait *wait)
{
struct scatterlist sg;
sg_init_one(&sg, data, len);
ahash_request_set_crypt(req, &sg, NULL, len);
- return verity_complete_op(res, crypto_ahash_update(req));
+ return crypto_wait_req(crypto_ahash_update(req), wait);
}
/*
* Wrapper for crypto_ahash_init, which handles verity salting.
*/
static int verity_hash_init(struct dm_verity *v, struct ahash_request *req,
- struct verity_result *res)
+ struct crypto_wait *wait)
{
int r;
ahash_request_set_tfm(req, v->tfm);
ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP |
CRYPTO_TFM_REQ_MAY_BACKLOG,
- verity_op_done, (void *)res);
- init_completion(&res->completion);
+ crypto_req_done, (void *)wait);
+ crypto_init_wait(wait);
- r = verity_complete_op(res, crypto_ahash_init(req));
+ r = crypto_wait_req(crypto_ahash_init(req), wait);
if (unlikely(r < 0)) {
DMERR("crypto_ahash_init failed: %d", r);
@@ -167,18 +126,18 @@ static int verity_hash_init(struct dm_verity *v, struct ahash_request *req,
}
if (likely(v->salt_size && (v->version >= 1)))
- r = verity_hash_update(v, req, v->salt, v->salt_size, res);
+ r = verity_hash_update(v, req, v->salt, v->salt_size, wait);
return r;
}
static int verity_hash_final(struct dm_verity *v, struct ahash_request *req,
- u8 *digest, struct verity_result *res)
+ u8 *digest, struct crypto_wait *wait)
{
int r;
if (unlikely(v->salt_size && (!v->version))) {
- r = verity_hash_update(v, req, v->salt, v->salt_size, res);
+ r = verity_hash_update(v, req, v->salt, v->salt_size, wait);
if (r < 0) {
DMERR("verity_hash_final failed updating salt: %d", r);
@@ -187,7 +146,7 @@ static int verity_hash_final(struct dm_verity *v, struct ahash_request *req,
}
ahash_request_set_crypt(req, NULL, digest, 0);
- r = verity_complete_op(res, crypto_ahash_final(req));
+ r = crypto_wait_req(crypto_ahash_final(req), wait);
out:
return r;
}
@@ -196,17 +155,17 @@ int verity_hash(struct dm_verity *v, struct ahash_request *req,
const u8 *data, size_t len, u8 *digest)
{
int r;
- struct verity_result res;
+ struct crypto_wait wait;
- r = verity_hash_init(v, req, &res);
+ r = verity_hash_init(v, req, &wait);
if (unlikely(r < 0))
goto out;
- r = verity_hash_update(v, req, data, len, &res);
+ r = verity_hash_update(v, req, data, len, &wait);
if (unlikely(r < 0))
goto out;
- r = verity_hash_final(v, req, digest, &res);
+ r = verity_hash_final(v, req, digest, &wait);
out:
return r;
@@ -389,7 +348,7 @@ out:
* Calculates the digest for the given bio
*/
int verity_for_io_block(struct dm_verity *v, struct dm_verity_io *io,
- struct bvec_iter *iter, struct verity_result *res)
+ struct bvec_iter *iter, struct crypto_wait *wait)
{
unsigned int todo = 1 << v->data_dev_block_bits;
struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_io_data_size);
@@ -414,7 +373,7 @@ int verity_for_io_block(struct dm_verity *v, struct dm_verity_io *io,
*/
sg_set_page(&sg, bv.bv_page, len, bv.bv_offset);
ahash_request_set_crypt(req, &sg, NULL, len);
- r = verity_complete_op(res, crypto_ahash_update(req));
+ r = crypto_wait_req(crypto_ahash_update(req), wait);
if (unlikely(r < 0)) {
DMERR("verity_for_io_block crypto op failed: %d", r);
@@ -482,7 +441,7 @@ static int verity_verify_io(struct dm_verity_io *io)
struct dm_verity *v = io->v;
struct bvec_iter start;
unsigned b;
- struct verity_result res;
+ struct crypto_wait wait;
for (b = 0; b < io->n_blocks; b++) {
int r;
@@ -507,17 +466,17 @@ static int verity_verify_io(struct dm_verity_io *io)
continue;
}
- r = verity_hash_init(v, req, &res);
+ r = verity_hash_init(v, req, &wait);
if (unlikely(r < 0))
return r;
start = io->iter;
- r = verity_for_io_block(v, io, &io->iter, &res);
+ r = verity_for_io_block(v, io, &io->iter, &wait);
if (unlikely(r < 0))
return r;
r = verity_hash_final(v, req, verity_io_real_digest(v, io),
- &res);
+ &wait);
if (unlikely(r < 0))
return r;
@@ -589,7 +548,7 @@ static void verity_prefetch_io(struct work_struct *work)
verity_hash_at_level(v, pw->block, i, &hash_block_start, NULL);
verity_hash_at_level(v, pw->block + pw->n_blocks - 1, i, &hash_block_end, NULL);
if (!i) {
- unsigned cluster = ACCESS_ONCE(dm_verity_prefetch_cluster);
+ unsigned cluster = READ_ONCE(dm_verity_prefetch_cluster);
cluster >>= v->data_dev_block_bits;
if (unlikely(!cluster))
diff --git a/drivers/md/dm-verity.h b/drivers/md/dm-verity.h
index a59e0ada6fd3..b675bc015512 100644
--- a/drivers/md/dm-verity.h
+++ b/drivers/md/dm-verity.h
@@ -90,11 +90,6 @@ struct dm_verity_io {
*/
};
-struct verity_result {
- struct completion completion;
- int err;
-};
-
static inline struct ahash_request *verity_io_hash_req(struct dm_verity *v,
struct dm_verity_io *io)
{
diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c
index b87c1741da4b..6d7bda6f8190 100644
--- a/drivers/md/dm-zoned-target.c
+++ b/drivers/md/dm-zoned-target.c
@@ -660,6 +660,7 @@ static int dmz_get_zoned_device(struct dm_target *ti, char *path)
struct dmz_target *dmz = ti->private;
struct request_queue *q;
struct dmz_dev *dev;
+ sector_t aligned_capacity;
int ret;
/* Get the target device */
@@ -685,15 +686,17 @@ static int dmz_get_zoned_device(struct dm_target *ti, char *path)
goto err;
}
+ q = bdev_get_queue(dev->bdev);
dev->capacity = i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT;
- if (ti->begin || (ti->len != dev->capacity)) {
+ aligned_capacity = dev->capacity & ~(blk_queue_zone_sectors(q) - 1);
+ if (ti->begin ||
+ ((ti->len != dev->capacity) && (ti->len != aligned_capacity))) {
ti->error = "Partial mapping not supported";
ret = -EINVAL;
goto err;
}
- q = bdev_get_queue(dev->bdev);
- dev->zone_nr_sectors = q->limits.chunk_sectors;
+ dev->zone_nr_sectors = blk_queue_zone_sectors(q);
dev->zone_nr_sectors_shift = ilog2(dev->zone_nr_sectors);
dev->zone_nr_blocks = dmz_sect2blk(dev->zone_nr_sectors);
@@ -929,8 +932,10 @@ static int dmz_iterate_devices(struct dm_target *ti,
iterate_devices_callout_fn fn, void *data)
{
struct dmz_target *dmz = ti->private;
+ struct dmz_dev *dev = dmz->dev;
+ sector_t capacity = dev->capacity & ~(dev->zone_nr_sectors - 1);
- return fn(ti, dmz->ddev, 0, dmz->dev->capacity, data);
+ return fn(ti, dmz->ddev, 0, capacity, data);
}
static struct target_type dmz_type = {
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 6e54145969c5..de17b7193299 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -24,6 +24,7 @@
#include <linux/delay.h>
#include <linux/wait.h>
#include <linux/pr.h>
+#include <linux/refcount.h>
#define DM_MSG_PREFIX "core"
@@ -52,6 +53,12 @@ static struct workqueue_struct *deferred_remove_workqueue;
atomic_t dm_global_event_nr = ATOMIC_INIT(0);
DECLARE_WAIT_QUEUE_HEAD(dm_global_eventq);
+void dm_issue_global_event(void)
+{
+ atomic_inc(&dm_global_event_nr);
+ wake_up(&dm_global_eventq);
+}
+
/*
* One of these is allocated per bio.
*/
@@ -92,7 +99,7 @@ struct dm_md_mempools {
struct table_device {
struct list_head list;
- atomic_t count;
+ refcount_t count;
struct dm_dev dm_dev;
};
@@ -108,7 +115,7 @@ static unsigned reserved_bio_based_ios = RESERVED_BIO_BASED_IOS;
static int __dm_get_module_param_int(int *module_param, int min, int max)
{
- int param = ACCESS_ONCE(*module_param);
+ int param = READ_ONCE(*module_param);
int modified_param = 0;
bool modified = true;
@@ -130,7 +137,7 @@ static int __dm_get_module_param_int(int *module_param, int min, int max)
unsigned __dm_get_module_param(unsigned *module_param,
unsigned def, unsigned max)
{
- unsigned param = ACCESS_ONCE(*module_param);
+ unsigned param = READ_ONCE(*module_param);
unsigned modified_param = 0;
if (!param)
@@ -679,10 +686,11 @@ int dm_get_table_device(struct mapped_device *md, dev_t dev, fmode_t mode,
format_dev_t(td->dm_dev.name, dev);
- atomic_set(&td->count, 0);
+ refcount_set(&td->count, 1);
list_add(&td->list, &md->table_devices);
+ } else {
+ refcount_inc(&td->count);
}
- atomic_inc(&td->count);
mutex_unlock(&md->table_devices_lock);
*result = &td->dm_dev;
@@ -695,7 +703,7 @@ void dm_put_table_device(struct mapped_device *md, struct dm_dev *d)
struct table_device *td = container_of(d, struct table_device, dm_dev);
mutex_lock(&md->table_devices_lock);
- if (atomic_dec_and_test(&td->count)) {
+ if (refcount_dec_and_test(&td->count)) {
close_table_device(td, md);
list_del(&td->list);
kfree(td);
@@ -712,7 +720,7 @@ static void free_table_devices(struct list_head *devices)
struct table_device *td = list_entry(tmp, struct table_device, list);
DMWARN("dm_destroy: %s still exists with %d references",
- td->dm_dev.name, atomic_read(&td->count));
+ td->dm_dev.name, refcount_read(&td->count));
kfree(td);
}
}
@@ -1613,17 +1621,6 @@ static void dm_wq_work(struct work_struct *work);
void dm_init_md_queue(struct mapped_device *md)
{
/*
- * Request-based dm devices cannot be stacked on top of bio-based dm
- * devices. The type of this dm device may not have been decided yet.
- * The type is decided at the first table loading time.
- * To prevent problematic device stacking, clear the queue flag
- * for request stacking support until then.
- *
- * This queue is new, so no concurrency on the queue_flags.
- */
- queue_flag_clear_unlocked(QUEUE_FLAG_STACKABLE, md->queue);
-
- /*
* Initialize data that will only be used by a non-blk-mq DM queue
* - must do so here (in alloc_dev callchain) before queue is used
*/
@@ -1689,7 +1686,7 @@ static struct mapped_device *alloc_dev(int minor)
struct mapped_device *md;
void *old_md;
- md = kzalloc_node(sizeof(*md), GFP_KERNEL, numa_node_id);
+ md = kvzalloc_node(sizeof(*md), GFP_KERNEL, numa_node_id);
if (!md) {
DMWARN("unable to allocate device, out of memory.");
return NULL;
@@ -1789,7 +1786,7 @@ bad_io_barrier:
bad_minor:
module_put(THIS_MODULE);
bad_module_get:
- kfree(md);
+ kvfree(md);
return NULL;
}
@@ -1808,7 +1805,7 @@ static void free_dev(struct mapped_device *md)
free_minor(minor);
module_put(THIS_MODULE);
- kfree(md);
+ kvfree(md);
}
static void __bind_mempools(struct mapped_device *md, struct dm_table *t)
@@ -1865,9 +1862,8 @@ static void event_callback(void *context)
dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
atomic_inc(&md->event_nr);
- atomic_inc(&dm_global_event_nr);
wake_up(&md->eventq);
- wake_up(&dm_global_eventq);
+ dm_issue_global_event();
}
/*
@@ -2067,17 +2063,12 @@ struct mapped_device *dm_get_md(dev_t dev)
spin_lock(&_minor_lock);
md = idr_find(&_minor_idr, minor);
- if (md) {
- if ((md == MINOR_ALLOCED ||
- (MINOR(disk_devt(dm_disk(md))) != minor) ||
- dm_deleting_md(md) ||
- test_bit(DMF_FREEING, &md->flags))) {
- md = NULL;
- goto out;
- }
- dm_get(md);
+ if (!md || md == MINOR_ALLOCED || (MINOR(disk_devt(dm_disk(md))) != minor) ||
+ test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) {
+ md = NULL;
+ goto out;
}
-
+ dm_get(md);
out:
spin_unlock(&_minor_lock);
@@ -2283,6 +2274,7 @@ struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table)
}
map = __bind(md, table, &limits);
+ dm_issue_global_event();
out:
mutex_unlock(&md->suspend_lock);
@@ -2703,11 +2695,15 @@ struct mapped_device *dm_get_from_kobject(struct kobject *kobj)
md = container_of(kobj, struct mapped_device, kobj_holder.kobj);
- if (test_bit(DMF_FREEING, &md->flags) ||
- dm_deleting_md(md))
- return NULL;
-
+ spin_lock(&_minor_lock);
+ if (test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) {
+ md = NULL;
+ goto out;
+ }
dm_get(md);
+out:
+ spin_unlock(&_minor_lock);
+
return md;
}
diff --git a/drivers/md/dm.h b/drivers/md/dm.h
index 38c84c0a35d4..36399bb875dd 100644
--- a/drivers/md/dm.h
+++ b/drivers/md/dm.h
@@ -19,6 +19,7 @@
#include <linux/hdreg.h>
#include <linux/completion.h>
#include <linux/kobject.h>
+#include <linux/refcount.h>
#include "dm-stats.h"
@@ -38,7 +39,7 @@
*/
struct dm_dev_internal {
struct list_head list;
- atomic_t count;
+ refcount_t count;
struct dm_dev *dm_dev;
};
diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c
index bb45c0ccc1bf..239c7bb3929b 100644
--- a/drivers/md/md-bitmap.c
+++ b/drivers/md/md-bitmap.c
@@ -368,7 +368,7 @@ static int read_page(struct file *file, unsigned long index,
pr_debug("read bitmap file (%dB @ %llu)\n", (int)PAGE_SIZE,
(unsigned long long)index << PAGE_SHIFT);
- bh = alloc_page_buffers(page, 1<<inode->i_blkbits, 0);
+ bh = alloc_page_buffers(page, 1<<inode->i_blkbits, false);
if (!bh) {
ret = -ENOMEM;
goto out;
diff --git a/drivers/md/md-bitmap.h b/drivers/md/md-bitmap.h
index d15721ac07a6..5df35ca90f58 100644
--- a/drivers/md/md-bitmap.h
+++ b/drivers/md/md-bitmap.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* bitmap.h: Copyright (C) Peter T. Breuer (ptb@ot.uc3m.es) 2003
*
diff --git a/drivers/md/md-cluster.h b/drivers/md/md-cluster.h
index 274016177983..c0240708f443 100644
--- a/drivers/md/md-cluster.h
+++ b/drivers/md/md-cluster.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _MD_CLUSTER_H
diff --git a/drivers/md/md-linear.h b/drivers/md/md-linear.h
index 8d392e6098b3..8381d651d4ed 100644
--- a/drivers/md/md-linear.h
+++ b/drivers/md/md-linear.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINEAR_H
#define _LINEAR_H
diff --git a/drivers/md/md-multipath.h b/drivers/md/md-multipath.h
index 717c60f62898..0adb941f485a 100644
--- a/drivers/md/md-multipath.h
+++ b/drivers/md/md-multipath.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _MULTIPATH_H
#define _MULTIPATH_H
diff --git a/drivers/md/md.c b/drivers/md/md.c
index e014d39159d7..09c3af3dcdca 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -2681,7 +2681,7 @@ state_show(struct md_rdev *rdev, char *page)
{
char *sep = ",";
size_t len = 0;
- unsigned long flags = ACCESS_ONCE(rdev->flags);
+ unsigned long flags = READ_ONCE(rdev->flags);
if (test_bit(Faulty, &flags) ||
(!test_bit(ExternalBbl, &flags) &&
diff --git a/drivers/md/persistent-data/Makefile b/drivers/md/persistent-data/Makefile
index ff528792c358..66be7c66479a 100644
--- a/drivers/md/persistent-data/Makefile
+++ b/drivers/md/persistent-data/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_DM_PERSISTENT_DATA) += dm-persistent-data.o
dm-persistent-data-objs := \
dm-array.o \
diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
index 4aed69d9dd17..aec449243966 100644
--- a/drivers/md/persistent-data/dm-space-map-metadata.c
+++ b/drivers/md/persistent-data/dm-space-map-metadata.c
@@ -11,6 +11,7 @@
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/device-mapper.h>
+#include <linux/kernel.h>
#define DM_MSG_PREFIX "space map metadata"
@@ -111,7 +112,7 @@ static bool brb_empty(struct bop_ring_buffer *brb)
static unsigned brb_next(struct bop_ring_buffer *brb, unsigned old)
{
unsigned r = old + 1;
- return (r >= (sizeof(brb->bops) / sizeof(*brb->bops))) ? 0 : r;
+ return r >= ARRAY_SIZE(brb->bops) ? 0 : r;
}
static int brb_push(struct bop_ring_buffer *brb,
diff --git a/drivers/md/raid0.h b/drivers/md/raid0.h
index 7127a623f5da..540e65d92642 100644
--- a/drivers/md/raid0.h
+++ b/drivers/md/raid0.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _RAID0_H
#define _RAID0_H
diff --git a/drivers/md/raid1-10.c b/drivers/md/raid1-10.c
index 9f2670b45f31..400001b815db 100644
--- a/drivers/md/raid1-10.c
+++ b/drivers/md/raid1-10.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* Maximum size of each resync request */
#define RESYNC_BLOCK_SIZE (64*1024)
#define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE)
diff --git a/drivers/md/raid1.h b/drivers/md/raid1.h
index c8894ef1e9d2..c7294e7557e0 100644
--- a/drivers/md/raid1.h
+++ b/drivers/md/raid1.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _RAID1_H
#define _RAID1_H
diff --git a/drivers/md/raid10.h b/drivers/md/raid10.h
index 2bef4e8789c8..db2ac22ac1b4 100644
--- a/drivers/md/raid10.h
+++ b/drivers/md/raid10.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _RAID10_H
#define _RAID10_H
diff --git a/drivers/md/raid5-log.h b/drivers/md/raid5-log.h
index c3596a27a5a8..284578b0a349 100644
--- a/drivers/md/raid5-log.h
+++ b/drivers/md/raid5-log.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _RAID5_LOG_H
#define _RAID5_LOG_H
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 1649e82faae2..31dc25e2871a 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -6070,7 +6070,7 @@ static inline sector_t raid5_sync_request(struct mddev *mddev, sector_t sector_n
*/
rcu_read_lock();
for (i = 0; i < conf->raid_disks; i++) {
- struct md_rdev *rdev = ACCESS_ONCE(conf->disks[i].rdev);
+ struct md_rdev *rdev = READ_ONCE(conf->disks[i].rdev);
if (rdev == NULL || test_bit(Faulty, &rdev->flags))
still_degraded = 1;
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
index f6536399677a..2e6123825095 100644
--- a/drivers/md/raid5.h
+++ b/drivers/md/raid5.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _RAID5_H
#define _RAID5_H