diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2023-04-26 23:05:21 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2023-04-26 23:05:21 +0300 |
commit | 48dc810012a6b4f4ba94073d6b7edb4f76edeb72 (patch) | |
tree | df9bee18cc806bb23b2bc279369c1a8deec12df1 /drivers | |
parent | 9dd6956b38923dc1b7b349ca1eee3c0bb1f0163a (diff) | |
parent | 38d11da522aacaa05898c734a1cec86f1e611129 (diff) | |
download | linux-48dc810012a6b4f4ba94073d6b7edb4f76edeb72.tar.xz |
Merge tag 'for-6.4/dm-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm
Pull device mapper updates from Mike Snitzer:
- Split dm-bufio's rw_semaphore and rbtree. Offers improvements to
dm-bufio's locking to allow increased concurrent IO -- particularly
for read access for buffers already in dm-bufio's cache.
- Also split dm-bio-prison-v1's spinlock and rbtree with comparable aim
at improving concurrent IO (for the DM thinp target).
- Both the dm-bufio and dm-bio-prison-v1 scaling of the number of locks
and rbtrees used are managed by dm_num_hash_locks(). And the hash
function used by both is dm_hash_locks_index().
- Allow DM targets to require DISCARD, WRITE_ZEROES and SECURE_ERASE to
be split at the target specified boundary (in terms of
max_discard_sectors, max_write_zeroes_sectors and
max_secure_erase_sectors respectively).
- DM verity error handling fix for check_at_most_once on FEC.
- Update DM verity target to emit audit events on verification failure
and more.
- DM core ->io_hints improvements needed in support of new discard
support that is added to the DM "zero" and "error" targets.
- Fix missing kmem_cache_destroy() call in initialization error path of
both the DM integrity and DM clone targets.
- A couple fixes for DM flakey, also add "error_reads" feature.
- Fix DM core's resume to not lock FS when the DM map is NULL;
otherwise initial table load can race with FS mount that takes
superblock's ->s_umount rw_semaphore.
- Various small improvements to both DM core and DM targets.
* tag 'for-6.4/dm-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm: (40 commits)
dm: don't lock fs when the map is NULL in process of resume
dm flakey: add an "error_reads" option
dm flakey: remove trailing space in the table line
dm flakey: fix a crash with invalid table line
dm ioctl: fix nested locking in table_clear() to remove deadlock concern
dm: unexport dm_get_queue_limits()
dm: allow targets to require splitting WRITE_ZEROES and SECURE_ERASE
dm: add helper macro for simple DM target module init and exit
dm raid: remove unused d variable
dm: remove unnecessary (void*) conversions
dm mirror: add DMERR message if alloc_workqueue fails
dm: push error reporting down to dm_register_target()
dm integrity: call kmem_cache_destroy() in dm_integrity_init() error path
dm clone: call kmem_cache_destroy() in dm_clone_init() error path
dm error: add discard support
dm zero: add discard support
dm table: allow targets without devices to set ->io_hints
dm verity: emit audit events on verification failure and more
dm verity: fix error handling for check_at_most_once on FEC
dm: improve hash_locks sizing and hash function
...
Diffstat (limited to 'drivers')
37 files changed, 1708 insertions, 1049 deletions
diff --git a/drivers/md/dm-bio-prison-v1.c b/drivers/md/dm-bio-prison-v1.c index c4c05d5d8909..92afdca760ae 100644 --- a/drivers/md/dm-bio-prison-v1.c +++ b/drivers/md/dm-bio-prison-v1.c @@ -18,10 +18,15 @@ #define MIN_CELLS 1024 -struct dm_bio_prison { +struct prison_region { spinlock_t lock; - struct rb_root cells; + struct rb_root cell; +} ____cacheline_aligned_in_smp; + +struct dm_bio_prison { mempool_t cell_pool; + unsigned int num_locks; + struct prison_region regions[]; }; static struct kmem_cache *_cell_cache; @@ -34,13 +39,20 @@ static struct kmem_cache *_cell_cache; */ struct dm_bio_prison *dm_bio_prison_create(void) { - struct dm_bio_prison *prison = kzalloc(sizeof(*prison), GFP_KERNEL); int ret; + unsigned int i, num_locks; + struct dm_bio_prison *prison; + num_locks = dm_num_hash_locks(); + prison = kzalloc(struct_size(prison, regions, num_locks), GFP_KERNEL); if (!prison) return NULL; + prison->num_locks = num_locks; - spin_lock_init(&prison->lock); + for (i = 0; i < prison->num_locks; i++) { + spin_lock_init(&prison->regions[i].lock); + prison->regions[i].cell = RB_ROOT; + } ret = mempool_init_slab_pool(&prison->cell_pool, MIN_CELLS, _cell_cache); if (ret) { @@ -48,8 +60,6 @@ struct dm_bio_prison *dm_bio_prison_create(void) return NULL; } - prison->cells = RB_ROOT; - return prison; } EXPORT_SYMBOL_GPL(dm_bio_prison_create); @@ -107,14 +117,32 @@ static int cmp_keys(struct dm_cell_key *lhs, return 0; } -static int __bio_detain(struct dm_bio_prison *prison, +static inline unsigned int lock_nr(struct dm_cell_key *key, unsigned int num_locks) +{ + return dm_hash_locks_index((key->block_begin >> BIO_PRISON_MAX_RANGE_SHIFT), + num_locks); +} + +bool dm_cell_key_has_valid_range(struct dm_cell_key *key) +{ + if (WARN_ON_ONCE(key->block_end - key->block_begin > BIO_PRISON_MAX_RANGE)) + return false; + if (WARN_ON_ONCE((key->block_begin >> BIO_PRISON_MAX_RANGE_SHIFT) != + (key->block_end - 1) >> BIO_PRISON_MAX_RANGE_SHIFT)) + return false; + + return true; +} +EXPORT_SYMBOL(dm_cell_key_has_valid_range); + +static int __bio_detain(struct rb_root *root, struct dm_cell_key *key, struct bio *inmate, struct dm_bio_prison_cell *cell_prealloc, struct dm_bio_prison_cell **cell_result) { int r; - struct rb_node **new = &prison->cells.rb_node, *parent = NULL; + struct rb_node **new = &root->rb_node, *parent = NULL; while (*new) { struct dm_bio_prison_cell *cell = @@ -139,7 +167,7 @@ static int __bio_detain(struct dm_bio_prison *prison, *cell_result = cell_prealloc; rb_link_node(&cell_prealloc->node, parent, new); - rb_insert_color(&cell_prealloc->node, &prison->cells); + rb_insert_color(&cell_prealloc->node, root); return 0; } @@ -151,10 +179,11 @@ static int bio_detain(struct dm_bio_prison *prison, struct dm_bio_prison_cell **cell_result) { int r; + unsigned l = lock_nr(key, prison->num_locks); - spin_lock_irq(&prison->lock); - r = __bio_detain(prison, key, inmate, cell_prealloc, cell_result); - spin_unlock_irq(&prison->lock); + spin_lock_irq(&prison->regions[l].lock); + r = __bio_detain(&prison->regions[l].cell, key, inmate, cell_prealloc, cell_result); + spin_unlock_irq(&prison->regions[l].lock); return r; } @@ -181,11 +210,11 @@ EXPORT_SYMBOL_GPL(dm_get_cell); /* * @inmates must have been initialised prior to this call */ -static void __cell_release(struct dm_bio_prison *prison, +static void __cell_release(struct rb_root *root, struct dm_bio_prison_cell *cell, struct bio_list *inmates) { - rb_erase(&cell->node, &prison->cells); + rb_erase(&cell->node, root); if (inmates) { if (cell->holder) @@ -198,20 +227,22 @@ void dm_cell_release(struct dm_bio_prison *prison, struct dm_bio_prison_cell *cell, struct bio_list *bios) { - spin_lock_irq(&prison->lock); - __cell_release(prison, cell, bios); - spin_unlock_irq(&prison->lock); + unsigned l = lock_nr(&cell->key, prison->num_locks); + + spin_lock_irq(&prison->regions[l].lock); + __cell_release(&prison->regions[l].cell, cell, bios); + spin_unlock_irq(&prison->regions[l].lock); } EXPORT_SYMBOL_GPL(dm_cell_release); /* * Sometimes we don't want the holder, just the additional bios. */ -static void __cell_release_no_holder(struct dm_bio_prison *prison, +static void __cell_release_no_holder(struct rb_root *root, struct dm_bio_prison_cell *cell, struct bio_list *inmates) { - rb_erase(&cell->node, &prison->cells); + rb_erase(&cell->node, root); bio_list_merge(inmates, &cell->bios); } @@ -219,11 +250,12 @@ void dm_cell_release_no_holder(struct dm_bio_prison *prison, struct dm_bio_prison_cell *cell, struct bio_list *inmates) { + unsigned l = lock_nr(&cell->key, prison->num_locks); unsigned long flags; - spin_lock_irqsave(&prison->lock, flags); - __cell_release_no_holder(prison, cell, inmates); - spin_unlock_irqrestore(&prison->lock, flags); + spin_lock_irqsave(&prison->regions[l].lock, flags); + __cell_release_no_holder(&prison->regions[l].cell, cell, inmates); + spin_unlock_irqrestore(&prison->regions[l].lock, flags); } EXPORT_SYMBOL_GPL(dm_cell_release_no_holder); @@ -248,18 +280,19 @@ void dm_cell_visit_release(struct dm_bio_prison *prison, void *context, struct dm_bio_prison_cell *cell) { - spin_lock_irq(&prison->lock); + unsigned l = lock_nr(&cell->key, prison->num_locks); + spin_lock_irq(&prison->regions[l].lock); visit_fn(context, cell); - rb_erase(&cell->node, &prison->cells); - spin_unlock_irq(&prison->lock); + rb_erase(&cell->node, &prison->regions[l].cell); + spin_unlock_irq(&prison->regions[l].lock); } EXPORT_SYMBOL_GPL(dm_cell_visit_release); -static int __promote_or_release(struct dm_bio_prison *prison, +static int __promote_or_release(struct rb_root *root, struct dm_bio_prison_cell *cell) { if (bio_list_empty(&cell->bios)) { - rb_erase(&cell->node, &prison->cells); + rb_erase(&cell->node, root); return 1; } @@ -271,10 +304,11 @@ int dm_cell_promote_or_release(struct dm_bio_prison *prison, struct dm_bio_prison_cell *cell) { int r; + unsigned l = lock_nr(&cell->key, prison->num_locks); - spin_lock_irq(&prison->lock); - r = __promote_or_release(prison, cell); - spin_unlock_irq(&prison->lock); + spin_lock_irq(&prison->regions[l].lock); + r = __promote_or_release(&prison->regions[l].cell, cell); + spin_unlock_irq(&prison->regions[l].lock); return r; } diff --git a/drivers/md/dm-bio-prison-v1.h b/drivers/md/dm-bio-prison-v1.h index dfbf1e94cb75..2a097ed0d85e 100644 --- a/drivers/md/dm-bio-prison-v1.h +++ b/drivers/md/dm-bio-prison-v1.h @@ -35,6 +35,16 @@ struct dm_cell_key { }; /* + * The range of a key (block_end - block_begin) must not + * exceed BIO_PRISON_MAX_RANGE. Also the range must not + * cross a similarly sized boundary. + * + * Must be a power of 2. + */ +#define BIO_PRISON_MAX_RANGE 1024 +#define BIO_PRISON_MAX_RANGE_SHIFT 10 + +/* * Treat this as opaque, only in header so callers can manage allocation * themselves. */ @@ -74,6 +84,11 @@ int dm_get_cell(struct dm_bio_prison *prison, struct dm_bio_prison_cell **cell_result); /* + * Returns false if key is beyond BIO_PRISON_MAX_RANGE or spans a boundary. + */ +bool dm_cell_key_has_valid_range(struct dm_cell_key *key); + +/* * An atomic op that combines retrieving or creating a cell, and adding a * bio to it. * diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c index cf077f9b30c3..8a448185662c 100644 --- a/drivers/md/dm-bufio.c +++ b/drivers/md/dm-bufio.c @@ -21,6 +21,8 @@ #include <linux/stacktrace.h> #include <linux/jump_label.h> +#include "dm.h" + #define DM_MSG_PREFIX "bufio" /* @@ -66,57 +68,241 @@ #define LIST_DIRTY 1 #define LIST_SIZE 2 +/*--------------------------------------------------------------*/ + /* - * Linking of buffers: - * All buffers are linked to buffer_tree with their node field. - * - * Clean buffers that are not being written (B_WRITING not set) - * are linked to lru[LIST_CLEAN] with their lru_list field. - * - * Dirty and clean buffers that are being written are linked to - * lru[LIST_DIRTY] with their lru_list field. When the write - * finishes, the buffer cannot be relinked immediately (because we - * are in an interrupt context and relinking requires process - * context), so some clean-not-writing buffers can be held on - * dirty_lru too. They are later added to lru in the process - * context. + * Rather than use an LRU list, we use a clock algorithm where entries + * are held in a circular list. When an entry is 'hit' a reference bit + * is set. The least recently used entry is approximated by running a + * cursor around the list selecting unreferenced entries. Referenced + * entries have their reference bit cleared as the cursor passes them. */ -struct dm_bufio_client { - struct mutex lock; - spinlock_t spinlock; - bool no_sleep; +struct lru_entry { + struct list_head list; + atomic_t referenced; +}; - struct list_head lru[LIST_SIZE]; - unsigned long n_buffers[LIST_SIZE]; +struct lru_iter { + struct lru *lru; + struct list_head list; + struct lru_entry *stop; + struct lru_entry *e; +}; - struct block_device *bdev; - unsigned int block_size; - s8 sectors_per_block_bits; - void (*alloc_callback)(struct dm_buffer *buf); - void (*write_callback)(struct dm_buffer *buf); - struct kmem_cache *slab_buffer; - struct kmem_cache *slab_cache; - struct dm_io_client *dm_io; +struct lru { + struct list_head *cursor; + unsigned long count; - struct list_head reserved_buffers; - unsigned int need_reserved_buffers; + struct list_head iterators; +}; - unsigned int minimum_buffers; +/*--------------*/ - struct rb_root buffer_tree; - wait_queue_head_t free_buffer_wait; +static void lru_init(struct lru *lru) +{ + lru->cursor = NULL; + lru->count = 0; + INIT_LIST_HEAD(&lru->iterators); +} - sector_t start; +static void lru_destroy(struct lru *lru) +{ + WARN_ON_ONCE(lru->cursor); + WARN_ON_ONCE(!list_empty(&lru->iterators)); +} - int async_write_error; +/* + * Insert a new entry into the lru. + */ +static void lru_insert(struct lru *lru, struct lru_entry *le) +{ + /* + * Don't be tempted to set to 1, makes the lru aspect + * perform poorly. + */ + atomic_set(&le->referenced, 0); - struct list_head client_list; + if (lru->cursor) { + list_add_tail(&le->list, lru->cursor); + } else { + INIT_LIST_HEAD(&le->list); + lru->cursor = &le->list; + } + lru->count++; +} - struct shrinker shrinker; - struct work_struct shrink_work; - atomic_long_t need_shrink; +/*--------------*/ + +/* + * Convert a list_head pointer to an lru_entry pointer. + */ +static inline struct lru_entry *to_le(struct list_head *l) +{ + return container_of(l, struct lru_entry, list); +} + +/* + * Initialize an lru_iter and add it to the list of cursors in the lru. + */ +static void lru_iter_begin(struct lru *lru, struct lru_iter *it) +{ + it->lru = lru; + it->stop = lru->cursor ? to_le(lru->cursor->prev) : NULL; + it->e = lru->cursor ? to_le(lru->cursor) : NULL; + list_add(&it->list, &lru->iterators); +} + +/* + * Remove an lru_iter from the list of cursors in the lru. + */ +static inline void lru_iter_end(struct lru_iter *it) +{ + list_del(&it->list); +} + +/* Predicate function type to be used with lru_iter_next */ +typedef bool (*iter_predicate)(struct lru_entry *le, void *context); + +/* + * Advance the cursor to the next entry that passes the + * predicate, and return that entry. Returns NULL if the + * iteration is complete. + */ +static struct lru_entry *lru_iter_next(struct lru_iter *it, + iter_predicate pred, void *context) +{ + struct lru_entry *e; + + while (it->e) { + e = it->e; + + /* advance the cursor */ + if (it->e == it->stop) + it->e = NULL; + else + it->e = to_le(it->e->list.next); + + if (pred(e, context)) + return e; + } + + return NULL; +} + +/* + * Invalidate a specific lru_entry and update all cursors in + * the lru accordingly. + */ +static void lru_iter_invalidate(struct lru *lru, struct lru_entry *e) +{ + struct lru_iter *it; + + list_for_each_entry(it, &lru->iterators, list) { + /* Move c->e forwards if necc. */ + if (it->e == e) { + it->e = to_le(it->e->list.next); + if (it->e == e) + it->e = NULL; + } + + /* Move it->stop backwards if necc. */ + if (it->stop == e) { + it->stop = to_le(it->stop->list.prev); + if (it->stop == e) + it->stop = NULL; + } + } +} + +/*--------------*/ + +/* + * Remove a specific entry from the lru. + */ +static void lru_remove(struct lru *lru, struct lru_entry *le) +{ + lru_iter_invalidate(lru, le); + if (lru->count == 1) { + lru->cursor = NULL; + } else { + if (lru->cursor == &le->list) + lru->cursor = lru->cursor->next; + list_del(&le->list); + } + lru->count--; +} + +/* + * Mark as referenced. + */ +static inline void lru_reference(struct lru_entry *le) +{ + atomic_set(&le->referenced, 1); +} + +/*--------------*/ + +/* + * Remove the least recently used entry (approx), that passes the predicate. + * Returns NULL on failure. + */ +enum evict_result { + ER_EVICT, + ER_DONT_EVICT, + ER_STOP, /* stop looking for something to evict */ }; +typedef enum evict_result (*le_predicate)(struct lru_entry *le, void *context); + +static struct lru_entry *lru_evict(struct lru *lru, le_predicate pred, void *context) +{ + unsigned long tested = 0; + struct list_head *h = lru->cursor; + struct lru_entry *le; + + if (!h) + return NULL; + /* + * In the worst case we have to loop around twice. Once to clear + * the reference flags, and then again to discover the predicate + * fails for all entries. + */ + while (tested < lru->count) { + le = container_of(h, struct lru_entry, list); + + if (atomic_read(&le->referenced)) { + atomic_set(&le->referenced, 0); + } else { + tested++; + switch (pred(le, context)) { + case ER_EVICT: + /* + * Adjust the cursor, so we start the next + * search from here. + */ + lru->cursor = le->list.next; + lru_remove(lru, le); + return le; + + case ER_DONT_EVICT: + break; + + case ER_STOP: + lru->cursor = le->list.next; + return NULL; + } + } + + h = h->next; + + cond_resched(); + } + + return NULL; +} + +/*--------------------------------------------------------------*/ + /* * Buffer state bits. */ @@ -137,26 +323,37 @@ enum data_mode { }; struct dm_buffer { + /* protected by the locks in dm_buffer_cache */ struct rb_node node; - struct list_head lru_list; - struct list_head global_list; + + /* immutable, so don't need protecting */ sector_t block; void *data; unsigned char data_mode; /* DATA_MODE_* */ + + /* + * These two fields are used in isolation, so do not need + * a surrounding lock. + */ + atomic_t hold_count; + unsigned long last_accessed; + + /* + * Everything else is protected by the mutex in + * dm_bufio_client + */ + unsigned long state; + struct lru_entry lru; unsigned char list_mode; /* LIST_* */ blk_status_t read_error; blk_status_t write_error; - unsigned int accessed; - unsigned int hold_count; - unsigned long state; - unsigned long last_accessed; unsigned int dirty_start; unsigned int dirty_end; unsigned int write_start; unsigned int write_end; - struct dm_bufio_client *c; struct list_head write_list; - void (*end_io)(struct dm_buffer *buf, blk_status_t stat); + struct dm_bufio_client *c; + void (*end_io)(struct dm_buffer *b, blk_status_t bs); #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING #define MAX_STACK 10 unsigned int stack_len; @@ -164,126 +361,507 @@ struct dm_buffer { #endif }; -static DEFINE_STATIC_KEY_FALSE(no_sleep_enabled); +/*--------------------------------------------------------------*/ -/*----------------------------------------------------------------*/ +/* + * The buffer cache manages buffers, particularly: + * - inc/dec of holder count + * - setting the last_accessed field + * - maintains clean/dirty state along with lru + * - selecting buffers that match predicates + * + * It does *not* handle: + * - allocation/freeing of buffers. + * - IO + * - Eviction or cache sizing. + * + * cache_get() and cache_put() are threadsafe, you do not need to + * protect these calls with a surrounding mutex. All the other + * methods are not threadsafe; they do use locking primitives, but + * only enough to ensure get/put are threadsafe. + */ -#define dm_bufio_in_request() (!!current->bio_list) +struct buffer_tree { + struct rw_semaphore lock; + struct rb_root root; +} ____cacheline_aligned_in_smp; -static void dm_bufio_lock(struct dm_bufio_client *c) +struct dm_buffer_cache { + struct lru lru[LIST_SIZE]; + /* + * We spread entries across multiple trees to reduce contention + * on the locks. + */ + unsigned int num_locks; + struct buffer_tree trees[]; +}; + +static inline unsigned int cache_index(sector_t block, unsigned int num_locks) { - if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep) - spin_lock_bh(&c->spinlock); - else - mutex_lock_nested(&c->lock, dm_bufio_in_request()); + return dm_hash_locks_index(block, num_locks); } -static int dm_bufio_trylock(struct dm_bufio_client *c) +static inline void cache_read_lock(struct dm_buffer_cache *bc, sector_t block) { - if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep) - return spin_trylock_bh(&c->spinlock); + down_read(&bc->trees[cache_index(block, bc->num_locks)].lock); +} + +static inline void cache_read_unlock(struct dm_buffer_cache *bc, sector_t block) +{ + up_read(&bc->trees[cache_index(block, bc->num_locks)].lock); +} + +static inline void cache_write_lock(struct dm_buffer_cache *bc, sector_t block) +{ + down_write(&bc->trees[cache_index(block, bc->num_locks)].lock); +} + +static inline void cache_write_unlock(struct dm_buffer_cache *bc, sector_t block) +{ + up_write(&bc->trees[cache_index(block, bc->num_locks)].lock); +} + +/* + * Sometimes we want to repeatedly get and drop locks as part of an iteration. + * This struct helps avoid redundant drop and gets of the same lock. + */ +struct lock_history { + struct dm_buffer_cache *cache; + bool write; + unsigned int previous; + unsigned int no_previous; +}; + +static void lh_init(struct lock_history *lh, struct dm_buffer_cache *cache, bool write) +{ + lh->cache = cache; + lh->write = write; + lh->no_previous = cache->num_locks; + lh->previous = lh->no_previous; +} + +static void __lh_lock(struct lock_history *lh, unsigned int index) +{ + if (lh->write) + down_write(&lh->cache->trees[index].lock); else - return mutex_trylock(&c->lock); + down_read(&lh->cache->trees[index].lock); } -static void dm_bufio_unlock(struct dm_bufio_client *c) +static void __lh_unlock(struct lock_history *lh, unsigned int index) { - if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep) - spin_unlock_bh(&c->spinlock); + if (lh->write) + up_write(&lh->cache->trees[index].lock); else - mutex_unlock(&c->lock); + up_read(&lh->cache->trees[index].lock); } -/*----------------------------------------------------------------*/ +/* + * Make sure you call this since it will unlock the final lock. + */ +static void lh_exit(struct lock_history *lh) +{ + if (lh->previous != lh->no_previous) { + __lh_unlock(lh, lh->previous); + lh->previous = lh->no_previous; + } +} /* - * Default cache size: available memory divided by the ratio. + * Named 'next' because there is no corresponding + * 'up/unlock' call since it's done automatically. */ -static unsigned long dm_bufio_default_cache_size; +static void lh_next(struct lock_history *lh, sector_t b) +{ + unsigned int index = cache_index(b, lh->no_previous); /* no_previous is num_locks */ + + if (lh->previous != lh->no_previous) { + if (lh->previous != index) { + __lh_unlock(lh, lh->previous); + __lh_lock(lh, index); + lh->previous = index; + } + } else { + __lh_lock(lh, index); + lh->previous = index; + } +} + +static inline struct dm_buffer *le_to_buffer(struct lru_entry *le) +{ + return container_of(le, struct dm_buffer, lru); +} + +static struct dm_buffer *list_to_buffer(struct list_head *l) +{ + struct lru_entry *le = list_entry(l, struct lru_entry, list); + + if (!le) + return NULL; + + return le_to_buffer(le); +} + +static void cache_init(struct dm_buffer_cache *bc, unsigned int num_locks) +{ + unsigned int i; + + bc->num_locks = num_locks; + + for (i = 0; i < bc->num_locks; i++) { + init_rwsem(&bc->trees[i].lock); + bc->trees[i].root = RB_ROOT; + } + + lru_init(&bc->lru[LIST_CLEAN]); + lru_init(&bc->lru[LIST_DIRTY]); +} + +static void cache_destroy(struct dm_buffer_cache *bc) +{ + unsigned int i; + + for (i = 0; i < bc->num_locks; i++) + WARN_ON_ONCE(!RB_EMPTY_ROOT(&bc->trees[i].root)); + + lru_destroy(&bc->lru[LIST_CLEAN]); + lru_destroy(&bc->lru[LIST_DIRTY]); +} + +/*--------------*/ /* - * Total cache size set by the user. + * not threadsafe, or racey depending how you look at it */ -static unsigned long dm_bufio_cache_size; +static inline unsigned long cache_count(struct dm_buffer_cache *bc, int list_mode) +{ + return bc->lru[list_mode].count; +} + +static inline unsigned long cache_total(struct dm_buffer_cache *bc) +{ + return cache_count(bc, LIST_CLEAN) + cache_count(bc, LIST_DIRTY); +} + +/*--------------*/ /* - * A copy of dm_bufio_cache_size because dm_bufio_cache_size can change - * at any time. If it disagrees, the user has changed cache size. + * Gets a specific buffer, indexed by block. + * If the buffer is found then its holder count will be incremented and + * lru_reference will be called. + * + * threadsafe */ -static unsigned long dm_bufio_cache_size_latch; +static struct dm_buffer *__cache_get(const struct rb_root *root, sector_t block) +{ + struct rb_node *n = root->rb_node; + struct dm_buffer *b; -static DEFINE_SPINLOCK(global_spinlock); + while (n) { + b = container_of(n, struct dm_buffer, node); + + if (b->block == block) + return b; + + n = block < b->block ? n->rb_left : n->rb_right; + } + + return NULL; +} + +static void __cache_inc_buffer(struct dm_buffer *b) +{ + atomic_inc(&b->hold_count); + WRITE_ONCE(b->last_accessed, jiffies); +} + +static struct dm_buffer *cache_get(struct dm_buffer_cache *bc, sector_t block) +{ + struct dm_buffer *b; + + cache_read_lock(bc, block); + b = __cache_get(&bc->trees[cache_index(block, bc->num_locks)].root, block); + if (b) { + lru_reference(&b->lru); + __cache_inc_buffer(b); + } + cache_read_unlock(bc, block); -static LIST_HEAD(global_queue); + return b; +} -static unsigned long global_num; +/*--------------*/ /* - * Buffers are freed after this timeout + * Returns true if the hold count hits zero. + * threadsafe */ -static unsigned int dm_bufio_max_age = DM_BUFIO_DEFAULT_AGE_SECS; -static unsigned long dm_bufio_retain_bytes = DM_BUFIO_DEFAULT_RETAIN_BYTES; +static bool cache_put(struct dm_buffer_cache *bc, struct dm_buffer *b) +{ + bool r; -static unsigned long dm_bufio_peak_allocated; -static unsigned long dm_bufio_allocated_kmem_cache; -static unsigned long dm_bufio_allocated_get_free_pages; -static unsigned long dm_bufio_allocated_vmalloc; -static unsigned long dm_bufio_current_allocated; + cache_read_lock(bc, b->block); + BUG_ON(!atomic_read(&b->hold_count)); + r = atomic_dec_and_test(&b->hold_count); + cache_read_unlock(bc, b->block); -/*----------------------------------------------------------------*/ + return r; +} + +/*--------------*/ + +typedef enum evict_result (*b_predicate)(struct dm_buffer *, void *); /* - * The current number of clients. + * Evicts a buffer based on a predicate. The oldest buffer that + * matches the predicate will be selected. In addition to the + * predicate the hold_count of the selected buffer will be zero. */ -static int dm_bufio_client_count; +struct evict_wrapper { + struct lock_history *lh; + b_predicate pred; + void *context; +}; /* - * The list of all clients. + * Wraps the buffer predicate turning it into an lru predicate. Adds + * extra test for hold_count. */ -static LIST_HEAD(dm_bufio_all_clients); +static enum evict_result __evict_pred(struct lru_entry *le, void *context) +{ + struct evict_wrapper *w = context; + struct dm_buffer *b = le_to_buffer(le); + + lh_next(w->lh, b->block); + + if (atomic_read(&b->hold_count)) + return ER_DONT_EVICT; + + return w->pred(b, w->context); +} + +static struct dm_buffer *__cache_evict(struct dm_buffer_cache *bc, int list_mode, + b_predicate pred, void *context, + struct lock_history *lh) +{ + struct evict_wrapper w = {.lh = lh, .pred = pred, .context = context}; + struct lru_entry *le; + struct dm_buffer *b; + + le = lru_evict(&bc->lru[list_mode], __evict_pred, &w); + if (!le) + return NULL; + + b = le_to_buffer(le); + /* __evict_pred will have locked the appropriate tree. */ + rb_erase(&b->node, &bc->trees[cache_index(b->block, bc->num_locks)].root); + + return b; +} + +static struct dm_buffer *cache_evict(struct dm_buffer_cache *bc, int list_mode, + b_predicate pred, void *context) +{ + struct dm_buffer *b; + struct lock_history lh; + + lh_init(&lh, bc, true); + b = __cache_evict(bc, list_mode, pred, context, &lh); + lh_exit(&lh); + + return b; +} + +/*--------------*/ /* - * This mutex protects dm_bufio_cache_size_latch and dm_bufio_client_count + * Mark a buffer as clean or dirty. Not threadsafe. */ -static DEFINE_MUTEX(dm_bufio_clients_lock); +static void cache_mark(struct dm_buffer_cache *bc, struct dm_buffer *b, int list_mode) +{ + cache_write_lock(bc, b->block); + if (list_mode != b->list_mode) { + lru_remove(&bc->lru[b->list_mode], &b->lru); + b->list_mode = list_mode; + lru_insert(&bc->lru[b->list_mode], &b->lru); + } + cache_write_unlock(bc, b->block); +} -static struct workqueue_struct *dm_bufio_wq; -static struct delayed_work dm_bufio_cleanup_old_work; -static struct work_struct dm_bufio_replacement_work; +/*--------------*/ +/* + * Runs through the lru associated with 'old_mode', if the predicate matches then + * it moves them to 'new_mode'. Not threadsafe. + */ +static void __cache_mark_many(struct dm_buffer_cache *bc, int old_mode, int new_mode, + b_predicate pred, void *context, struct lock_history *lh) +{ + struct lru_entry *le; + struct dm_buffer *b; + struct evict_wrapper w = {.lh = lh, .pred = pred, .context = context}; -#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING -static void buffer_record_stack(struct dm_buffer *b) + while (true) { + le = lru_evict(&bc->lru[old_mode], __evict_pred, &w); + if (!le) + break; + + b = le_to_buffer(le); + b->list_mode = new_mode; + lru_insert(&bc->lru[b->list_mode], &b->lru); + } +} + +static void cache_mark_many(struct dm_buffer_cache *bc, int old_mode, int new_mode, + b_predicate pred, void *context) { - b->stack_len = stack_trace_save(b->stack_entries, MAX_STACK, 2); + struct lock_history lh; + + lh_init(&lh, bc, true); + __cache_mark_many(bc, old_mode, new_mode, pred, context, &lh); + lh_exit(&lh); } -#endif + +/*--------------*/ + +/* + * Iterates through all clean or dirty entries calling a function for each + * entry. The callback may terminate the iteration early. Not threadsafe. + */ /* - *---------------------------------------------------------------- - * A red/black tree acts as an index for all the buffers. - *---------------------------------------------------------------- + * Iterator functions should return one of these actions to indicate + * how the iteration should proceed. */ -static struct dm_buffer *__find(struct dm_bufio_client *c, sector_t block) +enum it_action { + IT_NEXT, + IT_COMPLETE, +}; + +typedef enum it_action (*iter_fn)(struct dm_buffer *b, void *context); + +static void __cache_iterate(struct dm_buffer_cache *bc, int list_mode, + iter_fn fn, void *context, struct lock_history *lh) { - struct rb_node *n = c->buffer_tree.rb_node; - struct dm_buffer *b; + struct lru *lru = &bc->lru[list_mode]; + struct lru_entry *le, *first; - while (n) { - b = container_of(n, struct dm_buffer, node); + if (!lru->cursor) + return; - if (b->block == block) - return b; + first = le = to_le(lru->cursor); + do { + struct dm_buffer *b = le_to_buffer(le); - n = block < b->block ? n->rb_left : n->rb_right; + lh_next(lh, b->block); + + switch (fn(b, context)) { + case IT_NEXT: + break; + + case IT_COMPLETE: + return; + } + cond_resched(); + + le = to_le(le->list.next); + } while (le != first); +} + +static void cache_iterate(struct dm_buffer_cache *bc, int list_mode, + iter_fn fn, void *context) +{ + struct lock_history lh; + + lh_init(&lh, bc, false); + __cache_iterate(bc, list_mode, fn, context, &lh); + lh_exit(&lh); +} + +/*--------------*/ + +/* + * Passes ownership of the buffer to the cache. Returns false if the + * buffer was already present (in which case ownership does not pass). + * eg, a race with another thread. + * + * Holder count should be 1 on insertion. + * + * Not threadsafe. + */ +static bool __cache_insert(struct rb_root *root, struct dm_buffer *b) +{ + struct rb_node **new = &root->rb_node, *parent = NULL; + struct dm_buffer *found; + + while (*new) { + found = container_of(*new, struct dm_buffer, node); + + if (found->block == b->block) + return false; + + parent = *new; + new = b->block < found->block ? + &found->node.rb_left : &found->node.rb_right; } - return NULL; + rb_link_node(&b->node, parent, new); + rb_insert_color(&b->node, root); + + return true; +} + +static bool cache_insert(struct dm_buffer_cache *bc, struct dm_buffer *b) +{ + bool r; + + if (WARN_ON_ONCE(b->list_mode >= LIST_SIZE)) + return false; + + cache_write_lock(bc, b->block); + BUG_ON(atomic_read(&b->hold_count) != 1); + r = __cache_insert(&bc->trees[cache_index(b->block, bc->num_locks)].root, b); + if (r) + lru_insert(&bc->lru[b->list_mode], &b->lru); + cache_write_unlock(bc, b->block); + + return r; } -static struct dm_buffer *__find_next(struct dm_bufio_client *c, sector_t block) +/*--------------*/ + +/* + * Removes buffer from cache, ownership of the buffer passes back to the caller. + * Fails if the hold_count is not one (ie. the caller holds the only reference). + * + * Not threadsafe. + */ +static bool cache_remove(struct dm_buffer_cache *bc, struct dm_buffer *b) { - struct rb_node *n = c->buffer_tree.rb_node; + bool r; + + cache_write_lock(bc, b->block); + + if (atomic_read(&b->hold_count) != 1) { + r = false; + } else { + r = true; + rb_erase(&b->node, &bc->trees[cache_index(b->block, bc->num_locks)].root); + lru_remove(&bc->lru[b->list_mode], &b->lru); + } + + cache_write_unlock(bc, b->block); + + return r; +} + +/*--------------*/ + +typedef void (*b_release)(struct dm_buffer *); + +static struct dm_buffer *__find_next(struct rb_root *root, sector_t block) +{ + struct rb_node *n = root->rb_node; struct dm_buffer *b; struct dm_buffer *best = NULL; @@ -304,35 +882,188 @@ static struct dm_buffer *__find_next(struct dm_bufio_client *c, sector_t block) return best; } -static void __insert(struct dm_bufio_client *c, struct dm_buffer *b) +static void __remove_range(struct dm_buffer_cache *bc, + struct rb_root *root, + sector_t begin, sector_t end, + b_predicate pred, b_release release) { - struct rb_node **new = &c->buffer_tree.rb_node, *parent = NULL; - struct dm_buffer *found; + struct dm_buffer *b; - while (*new) { - found = container_of(*new, struct dm_buffer, node); + while (true) { + cond_resched(); - if (found->block == b->block) { - BUG_ON(found != b); - return; + b = __find_next(root, begin); + if (!b || (b->block >= end)) + break; + + begin = b->block + 1; + + if (atomic_read(&b->hold_count)) + continue; + + if (pred(b, NULL) == ER_EVICT) { + rb_erase(&b->node, root); + lru_remove(&bc->lru[b->list_mode], &b->lru); + release(b); } + } +} - parent = *new; - new = b->block < found->block ? - &found->node.rb_left : &found->node.rb_right; +static void cache_remove_range(struct dm_buffer_cache *bc, + sector_t begin, sector_t end, + b_predicate pred, b_release release) +{ + unsigned int i; + + for (i = 0; i < bc->num_locks; i++) { + down_write(&bc->trees[i].lock); + __remove_range(bc, &bc->trees[i].root, begin, end, pred, release); + up_write(&bc->trees[i].lock); } +} - rb_link_node(&b->node, parent, new); - rb_insert_color(&b->node, &c->buffer_tree); +/*----------------------------------------------------------------*/ + +/* + * Linking of buffers: + * All buffers are linked to buffer_cache with their node field. + * + * Clean buffers that are not being written (B_WRITING not set) + * are linked to lru[LIST_CLEAN] with their lru_list field. + * + * Dirty and clean buffers that are being written are linked to + * lru[LIST_DIRTY] with their lru_list field. When the write + * finishes, the buffer cannot be relinked immediately (because we + * are in an interrupt context and relinking requires process + * context), so some clean-not-writing buffers can be held on + * dirty_lru too. They are later added to lru in the process + * context. + */ +struct dm_bufio_client { + struct block_device *bdev; + unsigned int block_size; + s8 sectors_per_block_bits; + + bool no_sleep; + struct mutex lock; + spinlock_t spinlock; + + int async_write_error; + + void (*alloc_callback)(struct dm_buffer *buf); + void (*write_callback)(struct dm_buffer *buf); + struct kmem_cache *slab_buffer; + struct kmem_cache *slab_cache; + struct dm_io_client *dm_io; + + struct list_head reserved_buffers; + unsigned int need_reserved_buffers; + + unsigned int minimum_buffers; + + sector_t start; + + struct shrinker shrinker; + struct work_struct shrink_work; + atomic_long_t need_shrink; + + wait_queue_head_t free_buffer_wait; + + struct list_head client_list; + + /* + * Used by global_cleanup to sort the clients list. + */ + unsigned long oldest_buffer; + + struct dm_buffer_cache cache; /* must be last member */ +}; + +static DEFINE_STATIC_KEY_FALSE(no_sleep_enabled); + +/*----------------------------------------------------------------*/ + +#define dm_bufio_in_request() (!!current->bio_list) + +static void dm_bufio_lock(struct dm_bufio_client *c) +{ + if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep) + spin_lock_bh(&c->spinlock); + else + mutex_lock_nested(&c->lock, dm_bufio_in_request()); } -static void __remove(struct dm_bufio_client *c, struct dm_buffer *b) +static void dm_bufio_unlock(struct dm_bufio_client *c) { - rb_erase(&b->node, &c->buffer_tree); + if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep) + spin_unlock_bh(&c->spinlock); + else + mutex_unlock(&c->lock); } /*----------------------------------------------------------------*/ +/* + * Default cache size: available memory divided by the ratio. + */ +static unsigned long dm_bufio_default_cache_size; + +/* + * Total cache size set by the user. + */ +static unsigned long dm_bufio_cache_size; + +/* + * A copy of dm_bufio_cache_size because dm_bufio_cache_size can change + * at any time. If it disagrees, the user has changed cache size. + */ +static unsigned long dm_bufio_cache_size_latch; + +static DEFINE_SPINLOCK(global_spinlock); + +/* + * Buffers are freed after this timeout + */ +static unsigned int dm_bufio_max_age = DM_BUFIO_DEFAULT_AGE_SECS; +static unsigned long dm_bufio_retain_bytes = DM_BUFIO_DEFAULT_RETAIN_BYTES; + +static unsigned long dm_bufio_peak_allocated; +static unsigned long dm_bufio_allocated_kmem_cache; +static unsigned long dm_bufio_allocated_get_free_pages; +static unsigned long dm_bufio_allocated_vmalloc; +static unsigned long dm_bufio_current_allocated; + +/*----------------------------------------------------------------*/ + +/* + * The current number of clients. + */ +static int dm_bufio_client_count; + +/* + * The list of all clients. + */ +static LIST_HEAD(dm_bufio_all_clients); + +/* + * This mutex protects dm_bufio_cache_size_latch and dm_bufio_client_count + */ +static DEFINE_MUTEX(dm_bufio_clients_lock); + +static struct workqueue_struct *dm_bufio_wq; +static struct delayed_work dm_bufio_cleanup_old_work; +static struct work_struct dm_bufio_replacement_work; + + +#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING +static void buffer_record_stack(struct dm_buffer *b) +{ + b->stack_len = stack_trace_save(b->stack_entries, MAX_STACK, 2); +} +#endif + +/*----------------------------------------------------------------*/ + static void adjust_total_allocated(struct dm_buffer *b, bool unlink) { unsigned char data_mode; @@ -358,16 +1089,9 @@ static void adjust_total_allocated(struct dm_buffer *b, bool unlink) if (dm_bufio_current_allocated > dm_bufio_peak_allocated) dm_bufio_peak_allocated = dm_bufio_current_allocated; - b->accessed = 1; - if (!unlink) { - list_add(&b->global_list, &global_queue); - global_num++; if (dm_bufio_current_allocated > dm_bufio_cache_size) queue_work(dm_bufio_wq, &dm_bufio_replacement_work); - } else { - list_del(&b->global_list); - global_num--; } spin_unlock(&global_spinlock); @@ -378,8 +1102,10 @@ static void adjust_total_allocated(struct dm_buffer *b, bool unlink) */ static void __cache_size_refresh(void) { - BUG_ON(!mutex_is_locked(&dm_bufio_clients_lock)); - BUG_ON(dm_bufio_client_count < 0); + if (WARN_ON(!mutex_is_locked(&dm_bufio_clients_lock))) + return; + if (WARN_ON(dm_bufio_client_count < 0)) + return; dm_bufio_cache_size_latch = READ_ONCE(dm_bufio_cache_size); @@ -495,6 +1221,7 @@ static struct dm_buffer *alloc_buffer(struct dm_bufio_client *c, gfp_t gfp_mask) kmem_cache_free(c->slab_buffer, b); return NULL; } + adjust_total_allocated(b, false); #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING b->stack_len = 0; @@ -509,62 +1236,12 @@ static void free_buffer(struct dm_buffer *b) { struct dm_bufio_client *c = b->c; + adjust_total_allocated(b, true); free_buffer_data(c, b->data, b->data_mode); kmem_cache_free(c->slab_buffer, b); } /* - * Link buffer to the buffer tree and clean or dirty queue. - */ -static void __link_buffer(struct dm_buffer *b, sector_t block, int dirty) -{ - struct dm_bufio_client *c = b->c; - - c->n_buffers[dirty]++; - b->block = block; - b->list_mode = dirty; - list_add(&b->lru_list, &c->lru[dirty]); - __insert(b->c, b); - b->last_accessed = jiffies; - - adjust_total_allocated(b, false); -} - -/* - * Unlink buffer from the buffer tree and dirty or clean queue. - */ -static void __unlink_buffer(struct dm_buffer *b) -{ - struct dm_bufio_client *c = b->c; - - BUG_ON(!c->n_buffers[b->list_mode]); - - c->n_buffers[b->list_mode]--; - __remove(b->c, b); - list_del(&b->lru_list); - - adjust_total_allocated(b, true); -} - -/* - * Place the buffer to the head of dirty or clean LRU queue. - */ -static void __relink_lru(struct dm_buffer *b, int dirty) -{ - struct dm_bufio_client *c = b->c; - - b->accessed = 1; - - BUG_ON(!c->n_buffers[b->list_mode]); - - c->n_buffers[b->list_mode]--; - c->n_buffers[dirty]++; - b->list_mode = dirty; - list_move(&b->lru_list, &c->lru[dirty]); - b->last_accessed = jiffies; -} - -/* *-------------------------------------------------------------------------- * Submit I/O on the buffer. * @@ -639,19 +1316,14 @@ static void use_bio(struct dm_buffer *b, enum req_op op, sector_t sector, { struct bio *bio; char *ptr; - unsigned int vec_size, len; + unsigned int len; - vec_size = b->c->block_size >> PAGE_SHIFT; - if (unlikely(b->c->sectors_per_block_bits < PAGE_SHIFT - SECTOR_SHIFT)) - vec_size += 2; - - bio = bio_kmalloc(vec_size, GFP_NOWAIT | __GFP_NORETRY | __GFP_NOWARN); + bio = bio_kmalloc(1, GFP_NOWAIT | __GFP_NORETRY | __GFP_NOWARN); if (!bio) { -dmio: use_dmio(b, op, sector, n_sectors, offset); return; } - bio_init(bio, b->c->bdev, bio->bi_inline_vecs, vec_size, op); + bio_init(bio, b->c->bdev, bio->bi_inline_vecs, 1, op); bio->bi_iter.bi_sector = sector; bio->bi_end_io = bio_complete; bio->bi_private = b; @@ -659,18 +1331,7 @@ dmio: ptr = (char *)b->data + offset; len = n_sectors << SECTOR_SHIFT; - do { - unsigned int this_step = min((unsigned int)(PAGE_SIZE - offset_in_page(ptr)), len); - - if (!bio_add_page(bio, virt_to_page(ptr), this_step, - offset_in_page(ptr))) { - bio_put(bio); - goto dmio; - } - - len -= this_step; - ptr += this_step; - } while (len > 0); + __bio_add_page(bio, virt_to_page(ptr), len, offset_in_page(ptr)); submit_bio(bio); } @@ -803,7 +1464,7 @@ static void __flush_write_list(struct list_head *write_list) */ static void __make_buffer_clean(struct dm_buffer *b) { - BUG_ON(b->hold_count); + BUG_ON(atomic_read(&b->hold_count)); /* smp_load_acquire() pairs with read_endio()'s smp_mb__before_atomic() */ if (!smp_load_acquire(&b->state)) /* fast case */ @@ -814,6 +1475,36 @@ static void __make_buffer_clean(struct dm_buffer *b) wait_on_bit_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE); } +static enum evict_result is_clean(struct dm_buffer *b, void *context) +{ + struct dm_bufio_client *c = context; + + /* These should never happen */ + if (WARN_ON_ONCE(test_bit(B_WRITING, &b->state))) + return ER_DONT_EVICT; + if (WARN_ON_ONCE(test_bit(B_DIRTY, &b->state))) + return ER_DONT_EVICT; + if (WARN_ON_ONCE(b->list_mode != LIST_CLEAN)) + return ER_DONT_EVICT; + + if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep && + unlikely(test_bit(B_READING, &b->state))) + return ER_DONT_EVICT; + + return ER_EVICT; +} + +static enum evict_result is_dirty(struct dm_buffer *b, void *context) +{ + /* These should never happen */ + if (WARN_ON_ONCE(test_bit(B_READING, &b->state))) + return ER_DONT_EVICT; + if (WARN_ON_ONCE(b->list_mode != LIST_DIRTY)) + return ER_DONT_EVICT; + + return ER_EVICT; +} + /* * Find some buffer that is not held by anybody, clean it, unlink it and * return it. @@ -822,34 +1513,20 @@ static struct dm_buffer *__get_unclaimed_buffer(struct dm_bufio_client *c) { struct dm_buffer *b; - list_for_each_entry_reverse(b, &c->lru[LIST_CLEAN], lru_list) { - BUG_ON(test_bit(B_WRITING, &b->state)); - BUG_ON(test_bit(B_DIRTY, &b->state)); - - if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep && - unlikely(test_bit_acquire(B_READING, &b->state))) - continue; - - if (!b->hold_count) { - __make_buffer_clean(b); - __unlink_buffer(b); - return b; - } - cond_resched(); + b = cache_evict(&c->cache, LIST_CLEAN, is_clean, c); + if (b) { + /* this also waits for pending reads */ + __make_buffer_clean(b); + return b; } if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep) return NULL; - list_for_each_entry_reverse(b, &c->lru[LIST_DIRTY], lru_list) { - BUG_ON(test_bit(B_READING, &b->state)); - - if (!b->hold_count) { - __make_buffer_clean(b); - __unlink_buffer(b); - return b; - } - cond_resched(); + b = cache_evict(&c->cache, LIST_DIRTY, is_dirty, NULL); + if (b) { + __make_buffer_clean(b); + return b; } return NULL; @@ -870,7 +1547,12 @@ static void __wait_for_free_buffer(struct dm_bufio_client *c) set_current_state(TASK_UNINTERRUPTIBLE); dm_bufio_unlock(c); - io_schedule(); + /* + * It's possible to miss a wake up event since we don't always + * hold c->lock when wake_up is called. So we have a timeout here, + * just in case. + */ + io_schedule_timeout(5 * HZ); remove_wait_queue(&c->free_buffer_wait, &wait); @@ -928,9 +1610,8 @@ static struct dm_buffer *__alloc_buffer_wait_no_callback(struct dm_bufio_client } if (!list_empty(&c->reserved_buffers)) { - b = list_entry(c->reserved_buffers.next, - struct dm_buffer, lru_list); - list_del(&b->lru_list); + b = list_to_buffer(c->reserved_buffers.next); + list_del(&b->lru.list); c->need_reserved_buffers++; return b; @@ -964,36 +1645,61 @@ static void __free_buffer_wake(struct dm_buffer *b) { struct dm_bufio_client *c = b->c; + b->block = -1; if (!c->need_reserved_buffers) free_buffer(b); else { - list_add(&b->lru_list, &c->reserved_buffers); + list_add(&b->lru.list, &c->reserved_buffers); c->need_reserved_buffers--; } - wake_up(&c->free_buffer_wait); + /* + * We hold the bufio lock here, so no one can add entries to the + * wait queue anyway. + */ + if (unlikely(waitqueue_active(&c->free_buffer_wait))) + wake_up(&c->free_buffer_wait); } -static void __write_dirty_buffers_async(struct dm_bufio_client *c, int no_wait, - struct list_head *write_list) +static enum evict_result cleaned(struct dm_buffer *b, void *context) { - struct dm_buffer *b, *tmp; + if (WARN_ON_ONCE(test_bit(B_READING, &b->state))) + return ER_DONT_EVICT; /* should never happen */ - list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_DIRTY], lru_list) { - BUG_ON(test_bit(B_READING, &b->state)); + if (test_bit(B_DIRTY, &b->state) || test_bit(B_WRITING, &b->state)) + return ER_DONT_EVICT; + else + return ER_EVICT; +} - if (!test_bit(B_DIRTY, &b->state) && - !test_bit(B_WRITING, &b->state)) { - __relink_lru(b, LIST_CLEAN); - continue; - } +static void __move_clean_buffers(struct dm_bufio_client *c) +{ + cache_mark_many(&c->cache, LIST_DIRTY, LIST_CLEAN, cleaned, NULL); +} - if (no_wait && test_bit(B_WRITING, &b->state)) - return; +struct write_context { + int no_wait; + struct list_head *write_list; +}; - __write_dirty_buffer(b, write_list); - cond_resched(); - } +static enum it_action write_one(struct dm_buffer *b, void *context) +{ + struct write_context *wc = context; + + if (wc->no_wait && test_bit(B_WRITING, &b->state)) + return IT_COMPLETE; + + __write_dirty_buffer(b, wc->write_list); + return IT_NEXT; +} + +static void __write_dirty_buffers_async(struct dm_bufio_client *c, int no_wait, + struct list_head *write_list) +{ + struct write_context wc = {.no_wait = no_wait, .write_list = write_list}; + + __move_clean_buffers(c); + cache_iterate(&c->cache, LIST_DIRTY, write_one, &wc); } /* @@ -1004,7 +1710,8 @@ static void __write_dirty_buffers_async(struct dm_bufio_client *c, int no_wait, static void __check_watermark(struct dm_bufio_client *c, struct list_head *write_list) { - if (c->n_buffers[LIST_DIRTY] > c->n_buffers[LIST_CLEAN] * DM_BUFIO_WRITEBACK_RATIO) + if (cache_count(&c->cache, LIST_DIRTY) > + cache_count(&c->cache, LIST_CLEAN) * DM_BUFIO_WRITEBACK_RATIO) __write_dirty_buffers_async(c, 1, write_list); } @@ -1014,6 +1721,21 @@ static void __check_watermark(struct dm_bufio_client *c, *-------------------------------------------------------------- */ +static void cache_put_and_wake(struct dm_bufio_client *c, struct dm_buffer *b) +{ + /* + * Relying on waitqueue_active() is racey, but we sleep + * with schedule_timeout anyway. + */ + if (cache_put(&c->cache, b) && + unlikely(waitqueue_active(&c->free_buffer_wait))) + wake_up(&c->free_buffer_wait); +} + +/* + * This assumes you have already checked the cache to see if the buffer + * is already present (it will recheck after dropping the lock for allocation). + */ static struct dm_buffer *__bufio_new(struct dm_bufio_client *c, sector_t block, enum new_flag nf, int *need_submit, struct list_head *write_list) @@ -1022,11 +1744,8 @@ static struct dm_buffer *__bufio_new(struct dm_bufio_client *c, sector_t block, *need_submit = 0; - b = __find(c, block); - if (b) - goto found_buffer; - - if (nf == NF_GET) + /* This can't be called with NF_GET */ + if (WARN_ON_ONCE(nf == NF_GET)) return NULL; new_b = __alloc_buffer_wait(c, nf); @@ -1037,7 +1756,7 @@ static struct dm_buffer *__bufio_new(struct dm_bufio_client *c, sector_t block, * We've had a period where the mutex was unlocked, so need to * recheck the buffer tree. */ - b = __find(c, block); + b = cache_get(&c->cache, block); if (b) { __free_buffer_wake(new_b); goto found_buffer; @@ -1046,24 +1765,35 @@ static struct dm_buffer *__bufio_new(struct dm_bufio_client *c, sector_t block, __check_watermark(c, write_list); b = new_b; - b->hold_count = 1; + atomic_set(&b->hold_count, 1); + WRITE_ONCE(b->last_accessed, jiffies); + b->block = block; b->read_error = 0; b->write_error = 0; - __link_buffer(b, block, LIST_CLEAN); + b->list_mode = LIST_CLEAN; - if (nf == NF_FRESH) { + if (nf == NF_FRESH) b->state = 0; - return b; + else { + b->state = 1 << B_READING; + *need_submit = 1; } - b->state = 1 << B_READING; - *need_submit = 1; + /* + * We mustn't insert into the cache until the B_READING state + * is set. Otherwise another thread could get it and use + * it before it had been read. + */ + cache_insert(&c->cache, b); return b; found_buffer: - if (nf == NF_PREFETCH) + if (nf == NF_PREFETCH) { + cache_put_and_wake(c, b); return NULL; + } + /* * Note: it is essential that we don't wait for the buffer to be * read if dm_bufio_get function is used. Both dm_bufio_get and @@ -1071,12 +1801,11 @@ found_buffer: * If the user called both dm_bufio_prefetch and dm_bufio_get on * the same buffer, it would deadlock if we waited. */ - if (nf == NF_GET && unlikely(test_bit_acquire(B_READING, &b->state))) + if (nf == NF_GET && unlikely(test_bit_acquire(B_READING, &b->state))) { + cache_put_and_wake(c, b); return NULL; + } - b->hold_count++; - __relink_lru(b, test_bit(B_DIRTY, &b->state) || - test_bit(B_WRITING, &b->state)); return b; } @@ -1106,18 +1835,50 @@ static void read_endio(struct dm_buffer *b, blk_status_t status) static void *new_read(struct dm_bufio_client *c, sector_t block, enum new_flag nf, struct dm_buffer **bp) { - int need_submit; + int need_submit = 0; struct dm_buffer *b; LIST_HEAD(write_list); - dm_bufio_lock(c); - b = __bufio_new(c, block, nf, &need_submit, &write_list); + *bp = NULL; + + /* + * Fast path, hopefully the block is already in the cache. No need + * to get the client lock for this. + */ + b = cache_get(&c->cache, block); + if (b) { + if (nf == NF_PREFETCH) { + cache_put_and_wake(c, b); + return NULL; + } + + /* + * Note: it is essential that we don't wait for the buffer to be + * read if dm_bufio_get function is used. Both dm_bufio_get and + * dm_bufio_prefetch can be used in the driver request routine. + * If the user called both dm_bufio_prefetch and dm_bufio_get on + * the same buffer, it would deadlock if we waited. + */ + if (nf == NF_GET && unlikely(test_bit_acquire(B_READING, &b->state))) { + cache_put_and_wake(c, b); + return NULL; + } + } + + if (!b) { + if (nf == NF_GET) + return NULL; + + dm_bufio_lock(c); + b = __bufio_new(c, block, nf, &need_submit, &write_list); + dm_bufio_unlock(c); + } + #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING - if (b && b->hold_count == 1) + if (b && (atomic_read(&b->hold_count) == 1)) buffer_record_stack(b); #endif - dm_bufio_unlock(c); __flush_write_list(&write_list); @@ -1152,7 +1913,8 @@ EXPORT_SYMBOL_GPL(dm_bufio_get); void *dm_bufio_read(struct dm_bufio_client *c, sector_t block, struct dm_buffer **bp) { - BUG_ON(dm_bufio_in_request()); + if (WARN_ON_ONCE(dm_bufio_in_request())) + return ERR_PTR(-EINVAL); return new_read(c, block, NF_READ, bp); } @@ -1161,7 +1923,8 @@ EXPORT_SYMBOL_GPL(dm_bufio_read); void *dm_bufio_new(struct dm_bufio_client *c, sector_t block, struct dm_buffer **bp) { - BUG_ON(dm_bufio_in_request()); + if (WARN_ON_ONCE(dm_bufio_in_request())) + return ERR_PTR(-EINVAL); return new_read(c, block, NF_FRESH, bp); } @@ -1174,15 +1937,23 @@ void dm_bufio_prefetch(struct dm_bufio_client *c, LIST_HEAD(write_list); - BUG_ON(dm_bufio_in_request()); + if (WARN_ON_ONCE(dm_bufio_in_request())) + return; /* should never happen */ blk_start_plug(&plug); - dm_bufio_lock(c); for (; n_blocks--; block++) { int need_submit; struct dm_buffer *b; + b = cache_get(&c->cache, block); + if (b) { + /* already in cache */ + cache_put_and_wake(c, b); + continue; + } + + dm_bufio_lock(c); b = __bufio_new(c, block, NF_PREFETCH, &need_submit, &write_list); if (unlikely(!list_empty(&write_list))) { @@ -1205,10 +1976,9 @@ void dm_bufio_prefetch(struct dm_bufio_client *c, goto flush_plug; dm_bufio_lock(c); } + dm_bufio_unlock(c); } - dm_bufio_unlock(c); - flush_plug: blk_finish_plug(&plug); } @@ -1218,29 +1988,28 @@ void dm_bufio_release(struct dm_buffer *b) { struct dm_bufio_client *c = b->c; - dm_bufio_lock(c); - - BUG_ON(!b->hold_count); - - b->hold_count--; - if (!b->hold_count) { - wake_up(&c->free_buffer_wait); + /* + * If there were errors on the buffer, and the buffer is not + * to be written, free the buffer. There is no point in caching + * invalid buffer. + */ + if ((b->read_error || b->write_error) && + !test_bit_acquire(B_READING, &b->state) && + !test_bit(B_WRITING, &b->state) && + !test_bit(B_DIRTY, &b->state)) { + dm_bufio_lock(c); - /* - * If there were errors on the buffer, and the buffer is not - * to be written, free the buffer. There is no point in caching - * invalid buffer. - */ - if ((b->read_error || b->write_error) && - !test_bit_acquire(B_READING, &b->state) && - !test_bit(B_WRITING, &b->state) && - !test_bit(B_DIRTY, &b->state)) { - __unlink_buffer(b); + /* cache remove can fail if there are other holders */ + if (cache_remove(&c->cache, b)) { __free_buffer_wake(b); + dm_bufio_unlock(c); + return; } + + dm_bufio_unlock(c); } - dm_bufio_unlock(c); + cache_put_and_wake(c, b); } EXPORT_SYMBOL_GPL(dm_bufio_release); @@ -1259,7 +2028,7 @@ void dm_bufio_mark_partial_buffer_dirty(struct dm_buffer *b, if (!test_and_set_bit(B_DIRTY, &b->state)) { b->dirty_start = start; b->dirty_end = end; - __relink_lru(b, LIST_DIRTY); + cache_mark(&c->cache, b, LIST_DIRTY); } else { if (start < b->dirty_start) b->dirty_start = start; @@ -1281,7 +2050,8 @@ void dm_bufio_write_dirty_buffers_async(struct dm_bufio_client *c) { LIST_HEAD(write_list); - BUG_ON(dm_bufio_in_request()); + if (WARN_ON_ONCE(dm_bufio_in_request())) + return; /* should never happen */ dm_bufio_lock(c); __write_dirty_buffers_async(c, 0, &write_list); @@ -1297,11 +2067,19 @@ EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers_async); * * Finally, we flush hardware disk cache. */ +static bool is_writing(struct lru_entry *e, void *context) +{ + struct dm_buffer *b = le_to_buffer(e); + + return test_bit(B_WRITING, &b->state); +} + int dm_bufio_write_dirty_buffers(struct dm_bufio_client *c) { int a, f; - unsigned long buffers_processed = 0; - struct dm_buffer *b, *tmp; + unsigned long nr_buffers; + struct lru_entry *e; + struct lru_iter it; LIST_HEAD(write_list); @@ -1311,52 +2089,32 @@ int dm_bufio_write_dirty_buffers(struct dm_bufio_client *c) __flush_write_list(&write_list); dm_bufio_lock(c); -again: - list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_DIRTY], lru_list) { - int dropped_lock = 0; - - if (buffers_processed < c->n_buffers[LIST_DIRTY]) - buffers_processed++; + nr_buffers = cache_count(&c->cache, LIST_DIRTY); + lru_iter_begin(&c->cache.lru[LIST_DIRTY], &it); + while ((e = lru_iter_next(&it, is_writing, c))) { + struct dm_buffer *b = le_to_buffer(e); + __cache_inc_buffer(b); BUG_ON(test_bit(B_READING, &b->state)); - if (test_bit(B_WRITING, &b->state)) { - if (buffers_processed < c->n_buffers[LIST_DIRTY]) { - dropped_lock = 1; - b->hold_count++; - dm_bufio_unlock(c); - wait_on_bit_io(&b->state, B_WRITING, - TASK_UNINTERRUPTIBLE); - dm_bufio_lock(c); - b->hold_count--; - } else - wait_on_bit_io(&b->state, B_WRITING, - TASK_UNINTERRUPTIBLE); + if (nr_buffers) { + nr_buffers--; + dm_bufio_unlock(c); + wait_on_bit_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE); + dm_bufio_lock(c); + } else { + wait_on_bit_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE); } - if (!test_bit(B_DIRTY, &b->state) && - !test_bit(B_WRITING, &b->state)) - __relink_lru(b, LIST_CLEAN); + if (!test_bit(B_DIRTY, &b->state) && !test_bit(B_WRITING, &b->state)) + cache_mark(&c->cache, b, LIST_CLEAN); - cond_resched(); + cache_put_and_wake(c, b); - /* - * If we dropped the lock, the list is no longer consistent, - * so we must restart the search. - * - * In the most common case, the buffer just processed is - * relinked to the clean list, so we won't loop scanning the - * same buffer again and again. - * - * This may livelock if there is another thread simultaneously - * dirtying buffers, so we count the number of buffers walked - * and if it exceeds the total number of buffers, it means that - * someone is doing some writes simultaneously with us. In - * this case, stop, dropping the lock. - */ - if (dropped_lock) - goto again; + cond_resched(); } + lru_iter_end(&it); + wake_up(&c->free_buffer_wait); dm_bufio_unlock(c); @@ -1386,7 +2144,8 @@ int dm_bufio_issue_flush(struct dm_bufio_client *c) .count = 0, }; - BUG_ON(dm_bufio_in_request()); + if (WARN_ON_ONCE(dm_bufio_in_request())) + return -EINVAL; return dm_io(&io_req, 1, &io_reg, NULL); } @@ -1409,95 +2168,30 @@ int dm_bufio_issue_discard(struct dm_bufio_client *c, sector_t block, sector_t c .count = block_to_sector(c, count), }; - BUG_ON(dm_bufio_in_request()); + if (WARN_ON_ONCE(dm_bufio_in_request())) + return -EINVAL; /* discards are optional */ return dm_io(&io_req, 1, &io_reg, NULL); } EXPORT_SYMBOL_GPL(dm_bufio_issue_discard); -/* - * We first delete any other buffer that may be at that new location. - * - * Then, we write the buffer to the original location if it was dirty. - * - * Then, if we are the only one who is holding the buffer, relink the buffer - * in the buffer tree for the new location. - * - * If there was someone else holding the buffer, we write it to the new - * location but not relink it, because that other user needs to have the buffer - * at the same place. - */ -void dm_bufio_release_move(struct dm_buffer *b, sector_t new_block) +static bool forget_buffer(struct dm_bufio_client *c, sector_t block) { - struct dm_bufio_client *c = b->c; - struct dm_buffer *new; - - BUG_ON(dm_bufio_in_request()); - - dm_bufio_lock(c); + struct dm_buffer *b; -retry: - new = __find(c, new_block); - if (new) { - if (new->hold_count) { - __wait_for_free_buffer(c); - goto retry; + b = cache_get(&c->cache, block); + if (b) { + if (likely(!smp_load_acquire(&b->state))) { + if (cache_remove(&c->cache, b)) + __free_buffer_wake(b); + else + cache_put_and_wake(c, b); + } else { + cache_put_and_wake(c, b); } - - /* - * FIXME: Is there any point waiting for a write that's going - * to be overwritten in a bit? - */ - __make_buffer_clean(new); - __unlink_buffer(new); - __free_buffer_wake(new); } - BUG_ON(!b->hold_count); - BUG_ON(test_bit(B_READING, &b->state)); - - __write_dirty_buffer(b, NULL); - if (b->hold_count == 1) { - wait_on_bit_io(&b->state, B_WRITING, - TASK_UNINTERRUPTIBLE); - set_bit(B_DIRTY, &b->state); - b->dirty_start = 0; - b->dirty_end = c->block_size; - __unlink_buffer(b); - __link_buffer(b, new_block, LIST_DIRTY); - } else { - sector_t old_block; - - wait_on_bit_lock_io(&b->state, B_WRITING, - TASK_UNINTERRUPTIBLE); - /* - * Relink buffer to "new_block" so that write_callback - * sees "new_block" as a block number. - * After the write, link the buffer back to old_block. - * All this must be done in bufio lock, so that block number - * change isn't visible to other threads. - */ - old_block = b->block; - __unlink_buffer(b); - __link_buffer(b, new_block, b->list_mode); - submit_io(b, REQ_OP_WRITE, write_endio); - wait_on_bit_io(&b->state, B_WRITING, - TASK_UNINTERRUPTIBLE); - __unlink_buffer(b); - __link_buffer(b, old_block, b->list_mode); - } - - dm_bufio_unlock(c); - dm_bufio_release(b); -} -EXPORT_SYMBOL_GPL(dm_bufio_release_move); - -static void forget_buffer_locked(struct dm_buffer *b) -{ - if (likely(!b->hold_count) && likely(!smp_load_acquire(&b->state))) { - __unlink_buffer(b); - __free_buffer_wake(b); - } + return b ? true : false; } /* @@ -1508,38 +2202,22 @@ static void forget_buffer_locked(struct dm_buffer *b) */ void dm_bufio_forget(struct dm_bufio_client *c, sector_t block) { - struct dm_buffer *b; - dm_bufio_lock(c); - - b = __find(c, block); - if (b) - forget_buffer_locked(b); - + forget_buffer(c, block); dm_bufio_unlock(c); } EXPORT_SYMBOL_GPL(dm_bufio_forget); -void dm_bufio_forget_buffers(struct dm_bufio_client *c, sector_t block, sector_t n_blocks) +static enum evict_result idle(struct dm_buffer *b, void *context) { - struct dm_buffer *b; - sector_t end_block = block + n_blocks; - - while (block < end_block) { - dm_bufio_lock(c); - - b = __find_next(c, block); - if (b) { - block = b->block + 1; - forget_buffer_locked(b); - } - - dm_bufio_unlock(c); - - if (!b) - break; - } + return b->state ? ER_DONT_EVICT : ER_EVICT; +} +void dm_bufio_forget_buffers(struct dm_bufio_client *c, sector_t block, sector_t n_blocks) +{ + dm_bufio_lock(c); + cache_remove_range(&c->cache, block, block + n_blocks, idle, __free_buffer_wake); + dm_bufio_unlock(c); } EXPORT_SYMBOL_GPL(dm_bufio_forget_buffers); @@ -1601,13 +2279,29 @@ struct dm_bufio_client *dm_bufio_get_client(struct dm_buffer *b) } EXPORT_SYMBOL_GPL(dm_bufio_get_client); +static enum it_action warn_leak(struct dm_buffer *b, void *context) +{ + bool *warned = context; + + WARN_ON(!(*warned)); + *warned = true; + DMERR("leaked buffer %llx, hold count %u, list %d", + (unsigned long long)b->block, atomic_read(&b->hold_count), b->list_mode); +#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING + stack_trace_print(b->stack_entries, b->stack_len, 1); + /* mark unclaimed to avoid WARN_ON at end of drop_buffers() */ + atomic_set(&b->hold_count, 0); +#endif + return IT_NEXT; +} + static void drop_buffers(struct dm_bufio_client *c) { - struct dm_buffer *b; int i; - bool warned = false; + struct dm_buffer *b; - BUG_ON(dm_bufio_in_request()); + if (WARN_ON(dm_bufio_in_request())) + return; /* should never happen */ /* * An optimization so that the buffers are not written one-by-one. @@ -1619,18 +2313,11 @@ static void drop_buffers(struct dm_bufio_client *c) while ((b = __get_unclaimed_buffer(c))) __free_buffer_wake(b); - for (i = 0; i < LIST_SIZE; i++) - list_for_each_entry(b, &c->lru[i], lru_list) { - WARN_ON(!warned); - warned = true; - DMERR("leaked buffer %llx, hold count %u, list %d", - (unsigned long long)b->block, b->hold_count, i); -#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING - stack_trace_print(b->stack_entries, b->stack_len, 1); - /* mark unclaimed to avoid BUG_ON below */ - b->hold_count = 0; -#endif - } + for (i = 0; i < LIST_SIZE; i++) { + bool warned = false; + + cache_iterate(&c->cache, i, warn_leak, &warned); + } #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING while ((b = __get_unclaimed_buffer(c))) @@ -1638,39 +2325,11 @@ static void drop_buffers(struct dm_bufio_client *c) #endif for (i = 0; i < LIST_SIZE; i++) - BUG_ON(!list_empty(&c->lru[i])); + WARN_ON(cache_count(&c->cache, i)); dm_bufio_unlock(c); } -/* - * We may not be able to evict this buffer if IO pending or the client - * is still using it. Caller is expected to know buffer is too old. - * - * And if GFP_NOFS is used, we must not do any I/O because we hold - * dm_bufio_clients_lock and we would risk deadlock if the I/O gets - * rerouted to different bufio client. - */ -static bool __try_evict_buffer(struct dm_buffer *b, gfp_t gfp) -{ - if (!(gfp & __GFP_FS) || - (static_branch_unlikely(&no_sleep_enabled) && b->c->no_sleep)) { - if (test_bit_acquire(B_READING, &b->state) || - test_bit(B_WRITING, &b->state) || - test_bit(B_DIRTY, &b->state)) - return false; - } - - if (b->hold_count) - return false; - - __make_buffer_clean(b); - __unlink_buffer(b); - __free_buffer_wake(b); - - return true; -} - static unsigned long get_retain_buffers(struct dm_bufio_client *c) { unsigned long retain_bytes = READ_ONCE(dm_bufio_retain_bytes); @@ -1686,22 +2345,28 @@ static unsigned long get_retain_buffers(struct dm_bufio_client *c) static void __scan(struct dm_bufio_client *c) { int l; - struct dm_buffer *b, *tmp; + struct dm_buffer *b; unsigned long freed = 0; - unsigned long count = c->n_buffers[LIST_CLEAN] + - c->n_buffers[LIST_DIRTY]; unsigned long retain_target = get_retain_buffers(c); + unsigned long count = cache_total(&c->cache); for (l = 0; l < LIST_SIZE; l++) { - list_for_each_entry_safe_reverse(b, tmp, &c->lru[l], lru_list) { + while (true) { if (count - freed <= retain_target) atomic_long_set(&c->need_shrink, 0); if (!atomic_long_read(&c->need_shrink)) - return; - if (__try_evict_buffer(b, GFP_KERNEL)) { - atomic_long_dec(&c->need_shrink); - freed++; - } + break; + + b = cache_evict(&c->cache, l, + l == LIST_CLEAN ? is_clean : is_dirty, c); + if (!b) + break; + + __make_buffer_clean(b); + __free_buffer_wake(b); + + atomic_long_dec(&c->need_shrink); + freed++; cond_resched(); } } @@ -1730,8 +2395,7 @@ static unsigned long dm_bufio_shrink_scan(struct shrinker *shrink, struct shrink static unsigned long dm_bufio_shrink_count(struct shrinker *shrink, struct shrink_control *sc) { struct dm_bufio_client *c = container_of(shrink, struct dm_bufio_client, shrinker); - unsigned long count = READ_ONCE(c->n_buffers[LIST_CLEAN]) + - READ_ONCE(c->n_buffers[LIST_DIRTY]); + unsigned long count = cache_total(&c->cache); unsigned long retain_target = get_retain_buffers(c); unsigned long queued_for_cleanup = atomic_long_read(&c->need_shrink); @@ -1758,8 +2422,8 @@ struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsign unsigned int flags) { int r; + unsigned int num_locks; struct dm_bufio_client *c; - unsigned int i; char slab_name[27]; if (!block_size || block_size & ((1 << SECTOR_SHIFT) - 1)) { @@ -1768,12 +2432,13 @@ struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsign goto bad_client; } - c = kzalloc(sizeof(*c), GFP_KERNEL); + num_locks = dm_num_hash_locks(); + c = kzalloc(sizeof(*c) + (num_locks * sizeof(struct buffer_tree)), GFP_KERNEL); if (!c) { r = -ENOMEM; goto bad_client; } - c->buffer_tree = RB_ROOT; + cache_init(&c->cache, num_locks); c->bdev = bdev; c->block_size = block_size; @@ -1790,11 +2455,6 @@ struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsign static_branch_inc(&no_sleep_enabled); } - for (i = 0; i < LIST_SIZE; i++) { - INIT_LIST_HEAD(&c->lru[i]); - c->n_buffers[i] = 0; - } - mutex_init(&c->lock); spin_lock_init(&c->spinlock); INIT_LIST_HEAD(&c->reserved_buffers); @@ -1866,9 +2526,9 @@ struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsign bad: while (!list_empty(&c->reserved_buffers)) { - struct dm_buffer *b = list_entry(c->reserved_buffers.next, - struct dm_buffer, lru_list); - list_del(&b->lru_list); + struct dm_buffer *b = list_to_buffer(c->reserved_buffers.next); + + list_del(&b->lru.list); free_buffer(b); } kmem_cache_destroy(c->slab_cache); @@ -1905,23 +2565,23 @@ void dm_bufio_client_destroy(struct dm_bufio_client *c) mutex_unlock(&dm_bufio_clients_lock); - BUG_ON(!RB_EMPTY_ROOT(&c->buffer_tree)); - BUG_ON(c->need_reserved_buffers); + WARN_ON(c->need_reserved_buffers); while (!list_empty(&c->reserved_buffers)) { - struct dm_buffer *b = list_entry(c->reserved_buffers.next, - struct dm_buffer, lru_list); - list_del(&b->lru_list); + struct dm_buffer *b = list_to_buffer(c->reserved_buffers.next); + + list_del(&b->lru.list); free_buffer(b); } for (i = 0; i < LIST_SIZE; i++) - if (c->n_buffers[i]) - DMERR("leaked buffer count %d: %ld", i, c->n_buffers[i]); + if (cache_count(&c->cache, i)) + DMERR("leaked buffer count %d: %lu", i, cache_count(&c->cache, i)); for (i = 0; i < LIST_SIZE; i++) - BUG_ON(c->n_buffers[i]); + WARN_ON(cache_count(&c->cache, i)); + cache_destroy(&c->cache); kmem_cache_destroy(c->slab_cache); kmem_cache_destroy(c->slab_buffer); dm_io_client_destroy(c->dm_io); @@ -1938,6 +2598,8 @@ void dm_bufio_set_sector_offset(struct dm_bufio_client *c, sector_t start) } EXPORT_SYMBOL_GPL(dm_bufio_set_sector_offset); +/*--------------------------------------------------------------*/ + static unsigned int get_max_age_hz(void) { unsigned int max_age = READ_ONCE(dm_bufio_max_age); @@ -1950,13 +2612,74 @@ static unsigned int get_max_age_hz(void) static bool older_than(struct dm_buffer *b, unsigned long age_hz) { - return time_after_eq(jiffies, b->last_accessed + age_hz); + return time_after_eq(jiffies, READ_ONCE(b->last_accessed) + age_hz); +} + +struct evict_params { + gfp_t gfp; + unsigned long age_hz; + + /* + * This gets updated with the largest last_accessed (ie. most + * recently used) of the evicted buffers. It will not be reinitialised + * by __evict_many(), so you can use it across multiple invocations. + */ + unsigned long last_accessed; +}; + +/* + * We may not be able to evict this buffer if IO pending or the client + * is still using it. + * + * And if GFP_NOFS is used, we must not do any I/O because we hold + * dm_bufio_clients_lock and we would risk deadlock if the I/O gets + * rerouted to different bufio client. + */ +static enum evict_result select_for_evict(struct dm_buffer *b, void *context) +{ + struct evict_params *params = context; + + if (!(params->gfp & __GFP_FS) || + (static_branch_unlikely(&no_sleep_enabled) && b->c->no_sleep)) { + if (test_bit_acquire(B_READING, &b->state) || + test_bit(B_WRITING, &b->state) || + test_bit(B_DIRTY, &b->state)) + return ER_DONT_EVICT; + } + + return older_than(b, params->age_hz) ? ER_EVICT : ER_STOP; } -static void __evict_old_buffers(struct dm_bufio_client *c, unsigned long age_hz) +static unsigned long __evict_many(struct dm_bufio_client *c, + struct evict_params *params, + int list_mode, unsigned long max_count) { - struct dm_buffer *b, *tmp; - unsigned long retain_target = get_retain_buffers(c); + unsigned long count; + unsigned long last_accessed; + struct dm_buffer *b; + + for (count = 0; count < max_count; count++) { + b = cache_evict(&c->cache, list_mode, select_for_evict, params); + if (!b) + break; + + last_accessed = READ_ONCE(b->last_accessed); + if (time_after_eq(params->last_accessed, last_accessed)) + params->last_accessed = last_accessed; + + __make_buffer_clean(b); + __free_buffer_wake(b); + + cond_resched(); + } + + return count; +} + +static void evict_old_buffers(struct dm_bufio_client *c, unsigned long age_hz) +{ + struct evict_params params = {.gfp = 0, .age_hz = age_hz, .last_accessed = 0}; + unsigned long retain = get_retain_buffers(c); unsigned long count; LIST_HEAD(write_list); @@ -1969,112 +2692,135 @@ static void __evict_old_buffers(struct dm_bufio_client *c, unsigned long age_hz) dm_bufio_lock(c); } - count = c->n_buffers[LIST_CLEAN] + c->n_buffers[LIST_DIRTY]; - list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_CLEAN], lru_list) { - if (count <= retain_target) - break; - - if (!older_than(b, age_hz)) - break; - - if (__try_evict_buffer(b, 0)) - count--; - - cond_resched(); - } + count = cache_total(&c->cache); + if (count > retain) + __evict_many(c, ¶ms, LIST_CLEAN, count - retain); dm_bufio_unlock(c); } -static void do_global_cleanup(struct work_struct *w) +static void cleanup_old_buffers(void) { - struct dm_bufio_client *locked_client = NULL; - struct dm_bufio_client *current_client; - struct dm_buffer *b; - unsigned int spinlock_hold_count; - unsigned long threshold = dm_bufio_cache_size - - dm_bufio_cache_size / DM_BUFIO_LOW_WATERMARK_RATIO; - unsigned long loops = global_num * 2; + unsigned long max_age_hz = get_max_age_hz(); + struct dm_bufio_client *c; mutex_lock(&dm_bufio_clients_lock); - while (1) { - cond_resched(); + __cache_size_refresh(); - spin_lock(&global_spinlock); - if (unlikely(dm_bufio_current_allocated <= threshold)) - break; + list_for_each_entry(c, &dm_bufio_all_clients, client_list) + evict_old_buffers(c, max_age_hz); - spinlock_hold_count = 0; -get_next: - if (!loops--) - break; - if (unlikely(list_empty(&global_queue))) - break; - b = list_entry(global_queue.prev, struct dm_buffer, global_list); - - if (b->accessed) { - b->accessed = 0; - list_move(&b->global_list, &global_queue); - if (likely(++spinlock_hold_count < 16)) - goto get_next; - spin_unlock(&global_spinlock); - continue; - } + mutex_unlock(&dm_bufio_clients_lock); +} - current_client = b->c; - if (unlikely(current_client != locked_client)) { - if (locked_client) - dm_bufio_unlock(locked_client); +static void work_fn(struct work_struct *w) +{ + cleanup_old_buffers(); - if (!dm_bufio_trylock(current_client)) { - spin_unlock(&global_spinlock); - dm_bufio_lock(current_client); - locked_client = current_client; - continue; - } + queue_delayed_work(dm_bufio_wq, &dm_bufio_cleanup_old_work, + DM_BUFIO_WORK_TIMER_SECS * HZ); +} - locked_client = current_client; - } +/*--------------------------------------------------------------*/ - spin_unlock(&global_spinlock); +/* + * Global cleanup tries to evict the oldest buffers from across _all_ + * the clients. It does this by repeatedly evicting a few buffers from + * the client that holds the oldest buffer. It's approximate, but hopefully + * good enough. + */ +static struct dm_bufio_client *__pop_client(void) +{ + struct list_head *h; - if (unlikely(!__try_evict_buffer(b, GFP_KERNEL))) { - spin_lock(&global_spinlock); - list_move(&b->global_list, &global_queue); - spin_unlock(&global_spinlock); - } + if (list_empty(&dm_bufio_all_clients)) + return NULL; + + h = dm_bufio_all_clients.next; + list_del(h); + return container_of(h, struct dm_bufio_client, client_list); +} + +/* + * Inserts the client in the global client list based on its + * 'oldest_buffer' field. + */ +static void __insert_client(struct dm_bufio_client *new_client) +{ + struct dm_bufio_client *c; + struct list_head *h = dm_bufio_all_clients.next; + + while (h != &dm_bufio_all_clients) { + c = container_of(h, struct dm_bufio_client, client_list); + if (time_after_eq(c->oldest_buffer, new_client->oldest_buffer)) + break; + h = h->next; } - spin_unlock(&global_spinlock); + list_add_tail(&new_client->client_list, h); +} - if (locked_client) - dm_bufio_unlock(locked_client); +static unsigned long __evict_a_few(unsigned long nr_buffers) +{ + unsigned long count; + struct dm_bufio_client *c; + struct evict_params params = { + .gfp = GFP_KERNEL, + .age_hz = 0, + /* set to jiffies in case there are no buffers in this client */ + .last_accessed = jiffies + }; - mutex_unlock(&dm_bufio_clients_lock); + c = __pop_client(); + if (!c) + return 0; + + dm_bufio_lock(c); + count = __evict_many(c, ¶ms, LIST_CLEAN, nr_buffers); + dm_bufio_unlock(c); + + if (count) + c->oldest_buffer = params.last_accessed; + __insert_client(c); + + return count; } -static void cleanup_old_buffers(void) +static void check_watermarks(void) { - unsigned long max_age_hz = get_max_age_hz(); + LIST_HEAD(write_list); struct dm_bufio_client *c; mutex_lock(&dm_bufio_clients_lock); + list_for_each_entry(c, &dm_bufio_all_clients, client_list) { + dm_bufio_lock(c); + __check_watermark(c, &write_list); + dm_bufio_unlock(c); + } + mutex_unlock(&dm_bufio_clients_lock); - __cache_size_refresh(); + __flush_write_list(&write_list); +} - list_for_each_entry(c, &dm_bufio_all_clients, client_list) - __evict_old_buffers(c, max_age_hz); +static void evict_old(void) +{ + unsigned long threshold = dm_bufio_cache_size - + dm_bufio_cache_size / DM_BUFIO_LOW_WATERMARK_RATIO; + mutex_lock(&dm_bufio_clients_lock); + while (dm_bufio_current_allocated > threshold) { + if (!__evict_a_few(64)) + break; + cond_resched(); + } mutex_unlock(&dm_bufio_clients_lock); } -static void work_fn(struct work_struct *w) +static void do_global_cleanup(struct work_struct *w) { - cleanup_old_buffers(); - - queue_delayed_work(dm_bufio_wq, &dm_bufio_cleanup_old_work, - DM_BUFIO_WORK_TIMER_SECS * HZ); + check_watermarks(); + evict_old(); } /* @@ -2159,7 +2905,7 @@ static void __exit dm_bufio_exit(void) bug = 1; } - BUG_ON(bug); + WARN_ON(bug); /* leaks are not worth crashing the system */ } module_init(dm_bufio_init) diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c index dbbcfa580078..872896218550 100644 --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c @@ -3459,7 +3459,6 @@ static int __init dm_cache_init(void) r = dm_register_target(&cache_target); if (r) { - DMERR("cache target registration failed: %d", r); kmem_cache_destroy(migration_cache); return r; } diff --git a/drivers/md/dm-clone-target.c b/drivers/md/dm-clone-target.c index f38a27604c7a..f467cdb5a022 100644 --- a/drivers/md/dm-clone-target.c +++ b/drivers/md/dm-clone-target.c @@ -2204,7 +2204,7 @@ static int __init dm_clone_init(void) r = dm_register_target(&clone_target); if (r < 0) { - DMERR("Failed to register clone target"); + kmem_cache_destroy(_hydration_cache); return r; } diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 3ba53dc3cc3f..8b47b913ee83 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -3659,25 +3659,7 @@ static struct target_type crypt_target = { .iterate_devices = crypt_iterate_devices, .io_hints = crypt_io_hints, }; - -static int __init dm_crypt_init(void) -{ - int r; - - r = dm_register_target(&crypt_target); - if (r < 0) - DMERR("register failed %d", r); - - return r; -} - -static void __exit dm_crypt_exit(void) -{ - dm_unregister_target(&crypt_target); -} - -module_init(dm_crypt_init); -module_exit(dm_crypt_exit); +module_dm(crypt); MODULE_AUTHOR("Jana Saout <jana@saout.de>"); MODULE_DESCRIPTION(DM_NAME " target for transparent encryption / decryption"); diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c index a425046f88c7..7433525e5985 100644 --- a/drivers/md/dm-delay.c +++ b/drivers/md/dm-delay.c @@ -367,31 +367,7 @@ static struct target_type delay_target = { .status = delay_status, .iterate_devices = delay_iterate_devices, }; - -static int __init dm_delay_init(void) -{ - int r; - - r = dm_register_target(&delay_target); - if (r < 0) { - DMERR("register failed %d", r); - goto bad_register; - } - - return 0; - -bad_register: - return r; -} - -static void __exit dm_delay_exit(void) -{ - dm_unregister_target(&delay_target); -} - -/* Module hooks */ -module_init(dm_delay_init); -module_exit(dm_delay_exit); +module_dm(delay); MODULE_DESCRIPTION(DM_NAME " delay target"); MODULE_AUTHOR("Heinz Mauelshagen <mauelshagen@redhat.com>"); diff --git a/drivers/md/dm-dust.c b/drivers/md/dm-dust.c index 7ae9936752de..12a377e06d02 100644 --- a/drivers/md/dm-dust.c +++ b/drivers/md/dm-dust.c @@ -570,24 +570,7 @@ static struct target_type dust_target = { .status = dust_status, .prepare_ioctl = dust_prepare_ioctl, }; - -static int __init dm_dust_init(void) -{ - int r = dm_register_target(&dust_target); - - if (r < 0) - DMERR("dm_register_target failed %d", r); - - return r; -} - -static void __exit dm_dust_exit(void) -{ - dm_unregister_target(&dust_target); -} - -module_init(dm_dust_init); -module_exit(dm_dust_exit); +module_dm(dust); MODULE_DESCRIPTION(DM_NAME " dust test target"); MODULE_AUTHOR("Bryan Gurney <dm-devel@redhat.com>"); diff --git a/drivers/md/dm-ebs-target.c b/drivers/md/dm-ebs-target.c index b1068a68bc46..435b45201f4d 100644 --- a/drivers/md/dm-ebs-target.c +++ b/drivers/md/dm-ebs-target.c @@ -452,24 +452,7 @@ static struct target_type ebs_target = { .prepare_ioctl = ebs_prepare_ioctl, .iterate_devices = ebs_iterate_devices, }; - -static int __init dm_ebs_init(void) -{ - int r = dm_register_target(&ebs_target); - - if (r < 0) - DMERR("register failed %d", r); - - return r; -} - -static void dm_ebs_exit(void) -{ - dm_unregister_target(&ebs_target); -} - -module_init(dm_ebs_init); -module_exit(dm_ebs_exit); +module_dm(ebs); MODULE_AUTHOR("Heinz Mauelshagen <dm-devel@redhat.com>"); MODULE_DESCRIPTION(DM_NAME " emulated block size target"); diff --git a/drivers/md/dm-era-target.c b/drivers/md/dm-era-target.c index c2e7780cdd2d..0d70914217ee 100644 --- a/drivers/md/dm-era-target.c +++ b/drivers/md/dm-era-target.c @@ -1753,27 +1753,7 @@ static struct target_type era_target = { .iterate_devices = era_iterate_devices, .io_hints = era_io_hints }; - -static int __init dm_era_init(void) -{ - int r; - - r = dm_register_target(&era_target); - if (r) { - DMERR("era target registration failed: %d", r); - return r; - } - - return 0; -} - -static void __exit dm_era_exit(void) -{ - dm_unregister_target(&era_target); -} - -module_init(dm_era_init); -module_exit(dm_era_exit); +module_dm(era); MODULE_DESCRIPTION(DM_NAME " era target"); MODULE_AUTHOR("Joe Thornber <ejt@redhat.com>"); diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c index 5b7556d2a9d9..bd80bcafbe50 100644 --- a/drivers/md/dm-flakey.c +++ b/drivers/md/dm-flakey.c @@ -37,6 +37,7 @@ struct flakey_c { }; enum feature_flag_bits { + ERROR_READS, DROP_WRITES, ERROR_WRITES }; @@ -53,7 +54,7 @@ static int parse_features(struct dm_arg_set *as, struct flakey_c *fc, const char *arg_name; static const struct dm_arg _args[] = { - {0, 6, "Invalid number of feature args"}, + {0, 7, "Invalid number of feature args"}, {1, UINT_MAX, "Invalid corrupt bio byte"}, {0, 255, "Invalid corrupt value to write into bio byte (0-255)"}, {0, UINT_MAX, "Invalid corrupt bio flags mask"}, @@ -77,6 +78,17 @@ static int parse_features(struct dm_arg_set *as, struct flakey_c *fc, } /* + * error_reads + */ + if (!strcasecmp(arg_name, "error_reads")) { + if (test_and_set_bit(ERROR_READS, &fc->flags)) { + ti->error = "Feature error_reads duplicated"; + return -EINVAL; + } + continue; + } + + /* * drop_writes */ if (!strcasecmp(arg_name, "drop_writes")) { @@ -125,9 +137,9 @@ static int parse_features(struct dm_arg_set *as, struct flakey_c *fc, * Direction r or w? */ arg_name = dm_shift_arg(as); - if (!strcasecmp(arg_name, "w")) + if (arg_name && !strcasecmp(arg_name, "w")) fc->corrupt_bio_rw = WRITE; - else if (!strcasecmp(arg_name, "r")) + else if (arg_name && !strcasecmp(arg_name, "r")) fc->corrupt_bio_rw = READ; else { ti->error = "Invalid corrupt bio direction (r or w)"; @@ -171,6 +183,12 @@ static int parse_features(struct dm_arg_set *as, struct flakey_c *fc, return -EINVAL; } + if (!fc->corrupt_bio_byte && !test_bit(ERROR_READS, &fc->flags) && + !test_bit(DROP_WRITES, &fc->flags) && !test_bit(ERROR_WRITES, &fc->flags)) { + set_bit(ERROR_WRITES, &fc->flags); + set_bit(ERROR_READS, &fc->flags); + } + return 0; } @@ -346,8 +364,7 @@ static int flakey_map(struct dm_target *ti, struct bio *bio) * Otherwise, flakey_end_io() will decide if the reads should be modified. */ if (bio_data_dir(bio) == READ) { - if (!fc->corrupt_bio_byte && !test_bit(DROP_WRITES, &fc->flags) && - !test_bit(ERROR_WRITES, &fc->flags)) + if (test_bit(ERROR_READS, &fc->flags)) return DM_MAPIO_KILL; goto map_bio; } @@ -373,11 +390,6 @@ static int flakey_map(struct dm_target *ti, struct bio *bio) } goto map_bio; } - - /* - * By default, error all I/O. - */ - return DM_MAPIO_KILL; } map_bio: @@ -404,8 +416,8 @@ static int flakey_end_io(struct dm_target *ti, struct bio *bio, */ corrupt_bio_data(bio, fc); } - } else if (!test_bit(DROP_WRITES, &fc->flags) && - !test_bit(ERROR_WRITES, &fc->flags)) { + } + if (test_bit(ERROR_READS, &fc->flags)) { /* * Error read during the down_interval if drop_writes * and error_writes were not configured. @@ -422,7 +434,7 @@ static void flakey_status(struct dm_target *ti, status_type_t type, { unsigned int sz = 0; struct flakey_c *fc = ti->private; - unsigned int drop_writes, error_writes; + unsigned int error_reads, drop_writes, error_writes; switch (type) { case STATUSTYPE_INFO: @@ -430,21 +442,24 @@ static void flakey_status(struct dm_target *ti, status_type_t type, break; case STATUSTYPE_TABLE: - DMEMIT("%s %llu %u %u ", fc->dev->name, + DMEMIT("%s %llu %u %u", fc->dev->name, (unsigned long long)fc->start, fc->up_interval, fc->down_interval); + error_reads = test_bit(ERROR_READS, &fc->flags); drop_writes = test_bit(DROP_WRITES, &fc->flags); error_writes = test_bit(ERROR_WRITES, &fc->flags); - DMEMIT("%u ", drop_writes + error_writes + (fc->corrupt_bio_byte > 0) * 5); + DMEMIT(" %u", error_reads + drop_writes + error_writes + (fc->corrupt_bio_byte > 0) * 5); + if (error_reads) + DMEMIT(" error_reads"); if (drop_writes) - DMEMIT("drop_writes "); + DMEMIT(" drop_writes"); else if (error_writes) - DMEMIT("error_writes "); + DMEMIT(" error_writes"); if (fc->corrupt_bio_byte) - DMEMIT("corrupt_bio_byte %u %c %u %u ", + DMEMIT(" corrupt_bio_byte %u %c %u %u", fc->corrupt_bio_byte, (fc->corrupt_bio_rw == WRITE) ? 'w' : 'r', fc->corrupt_bio_value, fc->corrupt_bio_flags); @@ -506,25 +521,7 @@ static struct target_type flakey_target = { .prepare_ioctl = flakey_prepare_ioctl, .iterate_devices = flakey_iterate_devices, }; - -static int __init dm_flakey_init(void) -{ - int r = dm_register_target(&flakey_target); - - if (r < 0) - DMERR("register failed %d", r); - - return r; -} - -static void __exit dm_flakey_exit(void) -{ - dm_unregister_target(&flakey_target); -} - -/* Module hooks */ -module_init(dm_flakey_init); -module_exit(dm_flakey_exit); +module_dm(flakey); MODULE_DESCRIPTION(DM_NAME " flakey target"); MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>"); diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c index b0d5057fbdd9..31838b13ea54 100644 --- a/drivers/md/dm-integrity.c +++ b/drivers/md/dm-integrity.c @@ -3118,7 +3118,7 @@ static int dm_integrity_reboot(struct notifier_block *n, unsigned long code, voi static void dm_integrity_postsuspend(struct dm_target *ti) { - struct dm_integrity_c *ic = (struct dm_integrity_c *)ti->private; + struct dm_integrity_c *ic = ti->private; int r; WARN_ON(unregister_reboot_notifier(&ic->reboot_notifier)); @@ -3167,7 +3167,7 @@ static void dm_integrity_postsuspend(struct dm_target *ti) static void dm_integrity_resume(struct dm_target *ti) { - struct dm_integrity_c *ic = (struct dm_integrity_c *)ti->private; + struct dm_integrity_c *ic = ti->private; __u64 old_provided_data_sectors = le64_to_cpu(ic->sb->provided_data_sectors); int r; @@ -3290,7 +3290,7 @@ static void dm_integrity_resume(struct dm_target *ti) static void dm_integrity_status(struct dm_target *ti, status_type_t type, unsigned int status_flags, char *result, unsigned int maxlen) { - struct dm_integrity_c *ic = (struct dm_integrity_c *)ti->private; + struct dm_integrity_c *ic = ti->private; unsigned int arg_count; size_t sz = 0; @@ -4703,11 +4703,12 @@ static int __init dm_integrity_init(void) } r = dm_register_target(&integrity_target); + if (r < 0) { + kmem_cache_destroy(journal_io_cache); + return r; + } - if (r < 0) - DMERR("register failed %d", r); - - return r; + return 0; } static void __exit dm_integrity_exit(void) diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c index dc2df76999b0..f053ce245814 100644 --- a/drivers/md/dm-io.c +++ b/drivers/md/dm-io.c @@ -187,7 +187,7 @@ static void list_get_page(struct dpages *dp, struct page **p, unsigned long *len, unsigned int *offset) { unsigned int o = dp->context_u; - struct page_list *pl = (struct page_list *) dp->context_ptr; + struct page_list *pl = dp->context_ptr; *p = pl->page; *len = PAGE_SIZE - o; @@ -196,7 +196,7 @@ static void list_get_page(struct dpages *dp, static void list_next_page(struct dpages *dp) { - struct page_list *pl = (struct page_list *) dp->context_ptr; + struct page_list *pl = dp->context_ptr; dp->context_ptr = pl->next; dp->context_u = 0; diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c index 50a1259294d1..cc77cf3d4109 100644 --- a/drivers/md/dm-ioctl.c +++ b/drivers/md/dm-ioctl.c @@ -1168,10 +1168,13 @@ static int do_resume(struct dm_ioctl *param) /* Do we need to load a new map ? */ if (new_map) { sector_t old_size, new_size; + int srcu_idx; /* Suspend if it isn't already suspended */ - if (param->flags & DM_SKIP_LOCKFS_FLAG) + old_map = dm_get_live_table(md, &srcu_idx); + if ((param->flags & DM_SKIP_LOCKFS_FLAG) || !old_map) suspend_flags &= ~DM_SUSPEND_LOCKFS_FLAG; + dm_put_live_table(md, srcu_idx); if (param->flags & DM_NOFLUSH_FLAG) suspend_flags |= DM_SUSPEND_NOFLUSH_FLAG; if (!dm_suspended_md(md)) @@ -1556,11 +1559,12 @@ static int table_clear(struct file *filp, struct dm_ioctl *param, size_t param_s has_new_map = true; } - param->flags &= ~DM_INACTIVE_PRESENT_FLAG; - - __dev_status(hc->md, param); md = hc->md; up_write(&_hash_lock); + + param->flags &= ~DM_INACTIVE_PRESENT_FLAG; + __dev_status(md, param); + if (old_map) { dm_sync_table(md); dm_table_destroy(old_map); diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c index a158c6e5fbd7..d01807c50f20 100644 --- a/drivers/md/dm-kcopyd.c +++ b/drivers/md/dm-kcopyd.c @@ -519,7 +519,7 @@ static int run_complete_job(struct kcopyd_job *job) static void complete_io(unsigned long error, void *context) { - struct kcopyd_job *job = (struct kcopyd_job *) context; + struct kcopyd_job *job = context; struct dm_kcopyd_client *kc = job->kc; io_job_finish(kc->throttle); @@ -696,7 +696,7 @@ static void segment_complete(int read_err, unsigned long write_err, /* FIXME: tidy this function */ sector_t progress = 0; sector_t count = 0; - struct kcopyd_job *sub_job = (struct kcopyd_job *) context; + struct kcopyd_job *sub_job = context; struct kcopyd_job *job = sub_job->master_job; struct dm_kcopyd_client *kc = job->kc; diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c index 3e622dcc9dbd..f4448d520ee9 100644 --- a/drivers/md/dm-linear.c +++ b/drivers/md/dm-linear.c @@ -72,7 +72,7 @@ bad: static void linear_dtr(struct dm_target *ti) { - struct linear_c *lc = (struct linear_c *) ti->private; + struct linear_c *lc = ti->private; dm_put_device(ti, lc->dev); kfree(lc); @@ -98,7 +98,7 @@ static int linear_map(struct dm_target *ti, struct bio *bio) static void linear_status(struct dm_target *ti, status_type_t type, unsigned int status_flags, char *result, unsigned int maxlen) { - struct linear_c *lc = (struct linear_c *) ti->private; + struct linear_c *lc = ti->private; size_t sz = 0; switch (type) { @@ -120,7 +120,7 @@ static void linear_status(struct dm_target *ti, status_type_t type, static int linear_prepare_ioctl(struct dm_target *ti, struct block_device **bdev) { - struct linear_c *lc = (struct linear_c *) ti->private; + struct linear_c *lc = ti->private; struct dm_dev *dev = lc->dev; *bdev = dev->bdev; diff --git a/drivers/md/dm-log-writes.c b/drivers/md/dm-log-writes.c index cbd0f81f4a35..f17a6cf2284e 100644 --- a/drivers/md/dm-log-writes.c +++ b/drivers/md/dm-log-writes.c @@ -429,7 +429,7 @@ static inline sector_t logdev_last_sector(struct log_writes_c *lc) static int log_writes_kthread(void *arg) { - struct log_writes_c *lc = (struct log_writes_c *)arg; + struct log_writes_c *lc = arg; sector_t sector = 0; while (!kthread_should_stop()) { @@ -937,24 +937,7 @@ static struct target_type log_writes_target = { .dax_zero_page_range = log_writes_dax_zero_page_range, .dax_recovery_write = log_writes_dax_recovery_write, }; - -static int __init dm_log_writes_init(void) -{ - int r = dm_register_target(&log_writes_target); - - if (r < 0) - DMERR("register failed %d", r); - - return r; -} - -static void __exit dm_log_writes_exit(void) -{ - dm_unregister_target(&log_writes_target); -} - -module_init(dm_log_writes_init); -module_exit(dm_log_writes_exit); +module_dm(log_writes); MODULE_DESCRIPTION(DM_NAME " log writes target"); MODULE_AUTHOR("Josef Bacik <jbacik@fb.com>"); diff --git a/drivers/md/dm-log.c b/drivers/md/dm-log.c index afd94d2e7295..f9f84236dfcd 100644 --- a/drivers/md/dm-log.c +++ b/drivers/md/dm-log.c @@ -530,7 +530,7 @@ static void destroy_log_context(struct log_c *lc) static void core_dtr(struct dm_dirty_log *log) { - struct log_c *lc = (struct log_c *) log->context; + struct log_c *lc = log->context; vfree(lc->clean_bits); destroy_log_context(lc); @@ -569,7 +569,7 @@ static int disk_ctr(struct dm_dirty_log *log, struct dm_target *ti, static void disk_dtr(struct dm_dirty_log *log) { - struct log_c *lc = (struct log_c *) log->context; + struct log_c *lc = log->context; dm_put_device(lc->ti, lc->log_dev); vfree(lc->disk_header); @@ -590,7 +590,7 @@ static int disk_resume(struct dm_dirty_log *log) { int r; unsigned int i; - struct log_c *lc = (struct log_c *) log->context; + struct log_c *lc = log->context; size_t size = lc->bitset_uint32_count * sizeof(uint32_t); /* read the disk header */ @@ -652,14 +652,14 @@ static int disk_resume(struct dm_dirty_log *log) static uint32_t core_get_region_size(struct dm_dirty_log *log) { - struct log_c *lc = (struct log_c *) log->context; + struct log_c *lc = log->context; return lc->region_size; } static int core_resume(struct dm_dirty_log *log) { - struct log_c *lc = (struct log_c *) log->context; + struct log_c *lc = log->context; lc->sync_search = 0; return 0; @@ -667,14 +667,14 @@ static int core_resume(struct dm_dirty_log *log) static int core_is_clean(struct dm_dirty_log *log, region_t region) { - struct log_c *lc = (struct log_c *) log->context; + struct log_c *lc = log->context; return log_test_bit(lc->clean_bits, region); } static int core_in_sync(struct dm_dirty_log *log, region_t region, int block) { - struct log_c *lc = (struct log_c *) log->context; + struct log_c *lc = log->context; return log_test_bit(lc->sync_bits, region); } @@ -727,14 +727,14 @@ static int disk_flush(struct dm_dirty_log *log) static void core_mark_region(struct dm_dirty_log *log, region_t region) { - struct log_c *lc = (struct log_c *) log->context; + struct log_c *lc = log->context; log_clear_bit(lc, lc->clean_bits, region); } static void core_clear_region(struct dm_dirty_log *log, region_t region) { - struct log_c *lc = (struct log_c *) log->context; + struct log_c *lc = log->context; if (likely(!lc->flush_failed)) log_set_bit(lc, lc->clean_bits, region); @@ -742,7 +742,7 @@ static void core_clear_region(struct dm_dirty_log *log, region_t region) static int core_get_resync_work(struct dm_dirty_log *log, region_t *region) { - struct log_c *lc = (struct log_c *) log->context; + struct log_c *lc = log->context; if (lc->sync_search >= lc->region_count) return 0; @@ -765,7 +765,7 @@ static int core_get_resync_work(struct dm_dirty_log *log, region_t *region) static void core_set_region_sync(struct dm_dirty_log *log, region_t region, int in_sync) { - struct log_c *lc = (struct log_c *) log->context; + struct log_c *lc = log->context; log_clear_bit(lc, lc->recovering_bits, region); if (in_sync) { @@ -779,7 +779,7 @@ static void core_set_region_sync(struct dm_dirty_log *log, region_t region, static region_t core_get_sync_count(struct dm_dirty_log *log) { - struct log_c *lc = (struct log_c *) log->context; + struct log_c *lc = log->context; return lc->sync_count; } diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index 61ab1a8d2c9c..bea3cda9938e 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c @@ -2235,11 +2235,8 @@ static int __init dm_multipath_init(void) } r = dm_register_target(&multipath_target); - if (r < 0) { - DMERR("request-based register failed %d", r); - r = -EINVAL; + if (r < 0) goto bad_register_target; - } return 0; diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index 60632b409b80..c8821fcb8299 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c @@ -2209,7 +2209,6 @@ static int super_load(struct md_rdev *rdev, struct md_rdev *refdev) static int super_init_validation(struct raid_set *rs, struct md_rdev *rdev) { int role; - unsigned int d; struct mddev *mddev = &rs->md; uint64_t events_sb; uint64_t failed_devices[DISKS_ARRAY_ELEMS]; @@ -2324,7 +2323,6 @@ static int super_init_validation(struct raid_set *rs, struct md_rdev *rdev) * to provide capacity for redundancy or during reshape * to add capacity to grow the raid set. */ - d = 0; rdev_for_each(r, mddev) { if (test_bit(Journal, &rdev->flags)) continue; @@ -2340,8 +2338,6 @@ static int super_init_validation(struct raid_set *rs, struct md_rdev *rdev) if (test_bit(FirstUse, &r->flags)) rebuild_and_new++; } - - d++; } if (new_devs == rs->raid_disks || !rebuilds) { @@ -4081,23 +4077,7 @@ static struct target_type raid_target = { .preresume = raid_preresume, .resume = raid_resume, }; - -static int __init dm_raid_init(void) -{ - DMINFO("Loading target version %u.%u.%u", - raid_target.version[0], - raid_target.version[1], - raid_target.version[2]); - return dm_register_target(&raid_target); -} - -static void __exit dm_raid_exit(void) -{ - dm_unregister_target(&raid_target); -} - -module_init(dm_raid_init); -module_exit(dm_raid_exit); +module_dm(raid); module_param(devices_handle_discard_safely, bool, 0644); MODULE_PARM_DESC(devices_handle_discard_safely, diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c index bc417a5e5b89..ddcb2bc4a617 100644 --- a/drivers/md/dm-raid1.c +++ b/drivers/md/dm-raid1.c @@ -604,7 +604,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads) static void write_callback(unsigned long error, void *context) { unsigned int i; - struct bio *bio = (struct bio *) context; + struct bio *bio = context; struct mirror_set *ms; int should_wake = 0; unsigned long flags; @@ -1180,7 +1180,7 @@ err_free_context: static void mirror_dtr(struct dm_target *ti) { - struct mirror_set *ms = (struct mirror_set *) ti->private; + struct mirror_set *ms = ti->private; del_timer_sync(&ms->timer); flush_workqueue(ms->kmirrord_wq); @@ -1246,7 +1246,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, blk_status_t *error) { int rw = bio_data_dir(bio); - struct mirror_set *ms = (struct mirror_set *) ti->private; + struct mirror_set *ms = ti->private; struct mirror *m = NULL; struct dm_bio_details *bd = NULL; struct dm_raid1_bio_record *bio_record = @@ -1311,7 +1311,7 @@ out: static void mirror_presuspend(struct dm_target *ti) { - struct mirror_set *ms = (struct mirror_set *) ti->private; + struct mirror_set *ms = ti->private; struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh); struct bio_list holds; @@ -1407,7 +1407,7 @@ static void mirror_status(struct dm_target *ti, status_type_t type, { unsigned int m, sz = 0; int num_feature_args = 0; - struct mirror_set *ms = (struct mirror_set *) ti->private; + struct mirror_set *ms = ti->private; struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh); char buffer[MAX_NR_MIRRORS + 1]; @@ -1498,23 +1498,21 @@ static struct target_type mirror_target = { static int __init dm_mirror_init(void) { - int r = -ENOMEM; + int r; dm_raid1_wq = alloc_workqueue("dm_raid1_wq", 0, 0); - if (!dm_raid1_wq) - goto bad_target; + if (!dm_raid1_wq) { + DMERR("Failed to alloc workqueue"); + return -ENOMEM; + } r = dm_register_target(&mirror_target); if (r < 0) { destroy_workqueue(dm_raid1_wq); - goto bad_target; + return r; } return 0; - -bad_target: - DMERR("Failed to register mirror target"); - return r; } static void __exit dm_mirror_exit(void) diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c index f14e5df27874..15649921f2a9 100644 --- a/drivers/md/dm-snap-persistent.c +++ b/drivers/md/dm-snap-persistent.c @@ -567,7 +567,7 @@ ret_destroy_bufio: static struct pstore *get_info(struct dm_exception_store *store) { - return (struct pstore *) store->context; + return store->context; } static void persistent_usage(struct dm_exception_store *store, diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c index f766c21408f1..9c49f53760d0 100644 --- a/drivers/md/dm-snap.c +++ b/drivers/md/dm-snap.c @@ -2815,22 +2815,16 @@ static int __init dm_snapshot_init(void) } r = dm_register_target(&snapshot_target); - if (r < 0) { - DMERR("snapshot target register failed %d", r); + if (r < 0) goto bad_register_snapshot_target; - } r = dm_register_target(&origin_target); - if (r < 0) { - DMERR("Origin target register failed %d", r); + if (r < 0) goto bad_register_origin_target; - } r = dm_register_target(&merge_target); - if (r < 0) { - DMERR("Merge target register failed %d", r); + if (r < 0) goto bad_register_merge_target; - } return 0; diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c index 8d6951157106..e2854a3cbd28 100644 --- a/drivers/md/dm-stripe.c +++ b/drivers/md/dm-stripe.c @@ -189,7 +189,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv) static void stripe_dtr(struct dm_target *ti) { unsigned int i; - struct stripe_c *sc = (struct stripe_c *) ti->private; + struct stripe_c *sc = ti->private; for (i = 0; i < sc->stripes; i++) dm_put_device(ti, sc->stripe[i].dev); @@ -360,7 +360,7 @@ static size_t stripe_dax_recovery_write(struct dm_target *ti, pgoff_t pgoff, static void stripe_status(struct dm_target *ti, status_type_t type, unsigned int status_flags, char *result, unsigned int maxlen) { - struct stripe_c *sc = (struct stripe_c *) ti->private; + struct stripe_c *sc = ti->private; unsigned int sz = 0; unsigned int i; diff --git a/drivers/md/dm-switch.c b/drivers/md/dm-switch.c index ee2432927e90..dfd9fb52a6f3 100644 --- a/drivers/md/dm-switch.c +++ b/drivers/md/dm-switch.c @@ -565,25 +565,7 @@ static struct target_type switch_target = { .prepare_ioctl = switch_prepare_ioctl, .iterate_devices = switch_iterate_devices, }; - -static int __init dm_switch_init(void) -{ - int r; - - r = dm_register_target(&switch_target); - if (r < 0) - DMERR("dm_register_target() failed %d", r); - - return r; -} - -static void __exit dm_switch_exit(void) -{ - dm_unregister_target(&switch_target); -} - -module_init(dm_switch_init); -module_exit(dm_switch_exit); +module_dm(switch); MODULE_DESCRIPTION(DM_NAME " dynamic path switching target"); MODULE_AUTHOR("Kevin D. O'Kelley <Kevin_OKelley@dell.com>"); diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 7899f5fb4c13..1398f1d6e83e 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -1661,8 +1661,12 @@ int dm_calculate_queue_limits(struct dm_table *t, blk_set_stacking_limits(&ti_limits); - if (!ti->type->iterate_devices) + if (!ti->type->iterate_devices) { + /* Set I/O hints portion of queue limits */ + if (ti->type->io_hints) + ti->type->io_hints(ti, &ti_limits); goto combine_limits; + } /* * Combine queue limits of all the devices this target uses. diff --git a/drivers/md/dm-target.c b/drivers/md/dm-target.c index 26ea22b1a0d7..27e2992ff249 100644 --- a/drivers/md/dm-target.c +++ b/drivers/md/dm-target.c @@ -85,12 +85,15 @@ int dm_register_target(struct target_type *tt) int rv = 0; down_write(&_lock); - if (__find_target_type(tt->name)) + if (__find_target_type(tt->name)) { + DMERR("%s: '%s' target already registered", + __func__, tt->name); rv = -EEXIST; - else + } else { list_add(&tt->list, &_targets); - + } up_write(&_lock); + return rv; } EXPORT_SYMBOL(dm_register_target); @@ -119,6 +122,7 @@ static int io_err_ctr(struct dm_target *tt, unsigned int argc, char **args) * Return error for discards instead of -EOPNOTSUPP */ tt->num_discard_bios = 1; + tt->discards_supported = true; return 0; } @@ -145,6 +149,13 @@ static void io_err_release_clone_rq(struct request *clone, { } +static void io_err_io_hints(struct dm_target *ti, struct queue_limits *limits) +{ + limits->max_discard_sectors = UINT_MAX; + limits->max_hw_discard_sectors = UINT_MAX; + limits->discard_granularity = 512; +} + static long io_err_dax_direct_access(struct dm_target *ti, pgoff_t pgoff, long nr_pages, enum dax_access_mode mode, void **kaddr, pfn_t *pfn) @@ -154,13 +165,14 @@ static long io_err_dax_direct_access(struct dm_target *ti, pgoff_t pgoff, static struct target_type error_target = { .name = "error", - .version = {1, 5, 0}, + .version = {1, 6, 0}, .features = DM_TARGET_WILDCARD, .ctr = io_err_ctr, .dtr = io_err_dtr, .map = io_err_map, .clone_and_map_rq = io_err_clone_and_map_rq, .release_clone_rq = io_err_release_clone_rq, + .io_hints = io_err_io_hints, .direct_access = io_err_dax_direct_access, }; diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index 13d4677baafd..2b13c949bd72 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c @@ -118,25 +118,27 @@ enum lock_space { PHYSICAL }; -static void build_key(struct dm_thin_device *td, enum lock_space ls, +static bool build_key(struct dm_thin_device *td, enum lock_space ls, dm_block_t b, dm_block_t e, struct dm_cell_key *key) { key->virtual = (ls == VIRTUAL); key->dev = dm_thin_dev_id(td); key->block_begin = b; key->block_end = e; + + return dm_cell_key_has_valid_range(key); } static void build_data_key(struct dm_thin_device *td, dm_block_t b, struct dm_cell_key *key) { - build_key(td, PHYSICAL, b, b + 1llu, key); + (void) build_key(td, PHYSICAL, b, b + 1llu, key); } static void build_virtual_key(struct dm_thin_device *td, dm_block_t b, struct dm_cell_key *key) { - build_key(td, VIRTUAL, b, b + 1llu, key); + (void) build_key(td, VIRTUAL, b, b + 1llu, key); } /*----------------------------------------------------------------*/ @@ -883,15 +885,17 @@ static void cell_defer_no_holder(struct thin_c *tc, struct dm_bio_prison_cell *c { struct pool *pool = tc->pool; unsigned long flags; - int has_work; + struct bio_list bios; - spin_lock_irqsave(&tc->lock, flags); - cell_release_no_holder(pool, cell, &tc->deferred_bio_list); - has_work = !bio_list_empty(&tc->deferred_bio_list); - spin_unlock_irqrestore(&tc->lock, flags); + bio_list_init(&bios); + cell_release_no_holder(pool, cell, &bios); - if (has_work) + if (!bio_list_empty(&bios)) { + spin_lock_irqsave(&tc->lock, flags); + bio_list_merge(&tc->deferred_bio_list, &bios); + spin_unlock_irqrestore(&tc->lock, flags); wake_worker(pool); + } } static void thin_defer_bio(struct thin_c *tc, struct bio *bio); @@ -1672,54 +1676,70 @@ static void break_up_discard_bio(struct thin_c *tc, dm_block_t begin, dm_block_t struct dm_cell_key data_key; struct dm_bio_prison_cell *data_cell; struct dm_thin_new_mapping *m; - dm_block_t virt_begin, virt_end, data_begin; + dm_block_t virt_begin, virt_end, data_begin, data_end; + dm_block_t len, next_boundary; while (begin != end) { - r = ensure_next_mapping(pool); - if (r) - /* we did our best */ - return; - r = dm_thin_find_mapped_range(tc->td, begin, end, &virt_begin, &virt_end, &data_begin, &maybe_shared); - if (r) + if (r) { /* * Silently fail, letting any mappings we've * created complete. */ break; - - build_key(tc->td, PHYSICAL, data_begin, data_begin + (virt_end - virt_begin), &data_key); - if (bio_detain(tc->pool, &data_key, NULL, &data_cell)) { - /* contention, we'll give up with this range */ - begin = virt_end; - continue; } - /* - * IO may still be going to the destination block. We must - * quiesce before we can do the removal. - */ - m = get_next_mapping(pool); - m->tc = tc; - m->maybe_shared = maybe_shared; - m->virt_begin = virt_begin; - m->virt_end = virt_end; - m->data_block = data_begin; - m->cell = data_cell; - m->bio = bio; + data_end = data_begin + (virt_end - virt_begin); /* - * The parent bio must not complete before sub discard bios are - * chained to it (see end_discard's bio_chain)! - * - * This per-mapping bi_remaining increment is paired with - * the implicit decrement that occurs via bio_endio() in - * end_discard(). + * Make sure the data region obeys the bio prison restrictions. */ - bio_inc_remaining(bio); - if (!dm_deferred_set_add_work(pool->all_io_ds, &m->list)) - pool->process_prepared_discard(m); + while (data_begin < data_end) { + r = ensure_next_mapping(pool); + if (r) + return; /* we did our best */ + + next_boundary = ((data_begin >> BIO_PRISON_MAX_RANGE_SHIFT) + 1) + << BIO_PRISON_MAX_RANGE_SHIFT; + len = min_t(sector_t, data_end - data_begin, next_boundary - data_begin); + + /* This key is certainly within range given the above splitting */ + (void) build_key(tc->td, PHYSICAL, data_begin, data_begin + len, &data_key); + if (bio_detain(tc->pool, &data_key, NULL, &data_cell)) { + /* contention, we'll give up with this range */ + data_begin += len; + continue; + } + + /* + * IO may still be going to the destination block. We must + * quiesce before we can do the removal. + */ + m = get_next_mapping(pool); + m->tc = tc; + m->maybe_shared = maybe_shared; + m->virt_begin = virt_begin; + m->virt_end = virt_begin + len; + m->data_block = data_begin; + m->cell = data_cell; + m->bio = bio; + + /* + * The parent bio must not complete before sub discard bios are + * chained to it (see end_discard's bio_chain)! + * + * This per-mapping bi_remaining increment is paired with + * the implicit decrement that occurs via bio_endio() in + * end_discard(). + */ + bio_inc_remaining(bio); + if (!dm_deferred_set_add_work(pool->all_io_ds, &m->list)) + pool->process_prepared_discard(m); + + virt_begin += len; + data_begin += len; + } begin = virt_end; } @@ -1761,8 +1781,13 @@ static void process_discard_bio(struct thin_c *tc, struct bio *bio) return; } - build_key(tc->td, VIRTUAL, begin, end, &virt_key); - if (bio_detain(tc->pool, &virt_key, bio, &virt_cell)) + if (unlikely(!build_key(tc->td, VIRTUAL, begin, end, &virt_key))) { + DMERR_LIMIT("Discard doesn't respect bio prison limits"); + bio_endio(bio); + return; + } + + if (bio_detain(tc->pool, &virt_key, bio, &virt_cell)) { /* * Potential starvation issue: We're relying on the * fs/application being well behaved, and not trying to @@ -1771,6 +1796,7 @@ static void process_discard_bio(struct thin_c *tc, struct bio *bio) * cell will never be granted. */ return; + } tc->pool->process_discard_cell(tc, virt_cell); } @@ -3378,13 +3404,13 @@ static int pool_ctr(struct dm_target *ti, unsigned int argc, char **argv) */ if (pf.discard_enabled && pf.discard_passdown) { ti->num_discard_bios = 1; - /* * Setting 'discards_supported' circumvents the normal * stacking of discard limits (this keeps the pool and * thin devices' discard limits consistent). */ ti->discards_supported = true; + ti->max_discard_granularity = true; } ti->private = pt; @@ -4094,7 +4120,7 @@ static struct target_type pool_target = { .name = "thin-pool", .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE | DM_TARGET_IMMUTABLE, - .version = {1, 22, 0}, + .version = {1, 23, 0}, .module = THIS_MODULE, .ctr = pool_ctr, .dtr = pool_dtr, @@ -4259,6 +4285,7 @@ static int thin_ctr(struct dm_target *ti, unsigned int argc, char **argv) if (tc->pool->pf.discard_enabled) { ti->discards_supported = true; ti->num_discard_bios = 1; + ti->max_discard_granularity = true; } mutex_unlock(&dm_thin_pool_table.mutex); @@ -4474,12 +4501,12 @@ static void thin_io_hints(struct dm_target *ti, struct queue_limits *limits) return; limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT; - limits->max_discard_sectors = 2048 * 1024 * 16; /* 16G */ + limits->max_discard_sectors = pool->sectors_per_block * BIO_PRISON_MAX_RANGE; } static struct target_type thin_target = { .name = "thin", - .version = {1, 22, 0}, + .version = {1, 23, 0}, .module = THIS_MODULE, .ctr = thin_ctr, .dtr = thin_dtr, diff --git a/drivers/md/dm-unstripe.c b/drivers/md/dm-unstripe.c index e7b7d5983a16..48587c16c445 100644 --- a/drivers/md/dm-unstripe.c +++ b/drivers/md/dm-unstripe.c @@ -192,19 +192,7 @@ static struct target_type unstripe_target = { .iterate_devices = unstripe_iterate_devices, .io_hints = unstripe_io_hints, }; - -static int __init dm_unstripe_init(void) -{ - return dm_register_target(&unstripe_target); -} - -static void __exit dm_unstripe_exit(void) -{ - dm_unregister_target(&unstripe_target); -} - -module_init(dm_unstripe_init); -module_exit(dm_unstripe_exit); +module_dm(unstripe); MODULE_DESCRIPTION(DM_NAME " unstriped target"); MODULE_ALIAS("dm-unstriped"); diff --git a/drivers/md/dm-verity-fec.c b/drivers/md/dm-verity-fec.c index 962fc32c947c..a9ee2faa75a2 100644 --- a/drivers/md/dm-verity-fec.c +++ b/drivers/md/dm-verity-fec.c @@ -567,14 +567,14 @@ out: static void *fec_rs_alloc(gfp_t gfp_mask, void *pool_data) { - struct dm_verity *v = (struct dm_verity *)pool_data; + struct dm_verity *v = pool_data; return init_rs_gfp(8, 0x11d, 0, 1, v->fec->roots, gfp_mask); } static void fec_rs_free(void *element, void *pool_data) { - struct rs_control *rs = (struct rs_control *)element; + struct rs_control *rs = element; if (rs) free_rs(rs); diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c index ade83ef3b439..e35c16e06d06 100644 --- a/drivers/md/dm-verity-target.c +++ b/drivers/md/dm-verity-target.c @@ -16,6 +16,7 @@ #include "dm-verity.h" #include "dm-verity-fec.h" #include "dm-verity-verify-sig.h" +#include "dm-audit.h" #include <linux/module.h> #include <linux/reboot.h> #include <linux/scatterlist.h> @@ -248,8 +249,10 @@ static int verity_handle_err(struct dm_verity *v, enum verity_block_type type, DMERR_LIMIT("%s: %s block %llu is corrupted", v->data_dev->name, type_str, block); - if (v->corrupted_errs == DM_VERITY_MAX_CORRUPTED_ERRS) + if (v->corrupted_errs == DM_VERITY_MAX_CORRUPTED_ERRS) { DMERR("%s: reached maximum errors", v->data_dev->name); + dm_audit_log_target(DM_MSG_PREFIX, "max-corrupted-errors", v->ti, 0); + } snprintf(verity_env, DM_VERITY_ENV_LENGTH, "%s=%d,%llu", DM_VERITY_ENV_VAR_NAME, type, block); @@ -340,6 +343,11 @@ static int verity_verify_level(struct dm_verity *v, struct dm_verity_io *io, else if (verity_handle_err(v, DM_VERITY_BLOCK_TYPE_METADATA, hash_block)) { + struct bio *bio = + dm_bio_from_per_bio_data(io, + v->ti->per_io_data_size); + dm_audit_log_bio(DM_MSG_PREFIX, "verify-metadata", bio, + block, 0); r = -EIO; goto release_ret_r; } @@ -523,7 +531,7 @@ static int verity_verify_io(struct dm_verity_io *io) sector_t cur_block = io->block + b; struct ahash_request *req = verity_io_hash_req(v, io); - if (v->validated_blocks && + if (v->validated_blocks && bio->bi_status == BLK_STS_OK && likely(test_bit(cur_block, v->validated_blocks))) { verity_bv_skip_block(v, io, iter); continue; @@ -590,8 +598,11 @@ static int verity_verify_io(struct dm_verity_io *io) return -EIO; } if (verity_handle_err(v, DM_VERITY_BLOCK_TYPE_DATA, - cur_block)) + cur_block)) { + dm_audit_log_bio(DM_MSG_PREFIX, "verify-data", + bio, cur_block, 0); return -EIO; + } } } @@ -975,6 +986,8 @@ static void verity_dtr(struct dm_target *ti) static_branch_dec(&use_tasklet_enabled); kfree(v); + + dm_audit_log_dtr(DM_MSG_PREFIX, ti, 1); } static int verity_alloc_most_once(struct dm_verity *v) @@ -1429,11 +1442,14 @@ static int verity_ctr(struct dm_target *ti, unsigned int argc, char **argv) verity_verify_sig_opts_cleanup(&verify_args); + dm_audit_log_ctr(DM_MSG_PREFIX, ti, 1); + return 0; bad: verity_verify_sig_opts_cleanup(&verify_args); + dm_audit_log_ctr(DM_MSG_PREFIX, ti, 0); verity_dtr(ti); return r; @@ -1498,25 +1514,7 @@ static struct target_type verity_target = { .iterate_devices = verity_iterate_devices, .io_hints = verity_io_hints, }; - -static int __init dm_verity_init(void) -{ - int r; - - r = dm_register_target(&verity_target); - if (r < 0) - DMERR("register failed %d", r); - - return r; -} - -static void __exit dm_verity_exit(void) -{ - dm_unregister_target(&verity_target); -} - -module_init(dm_verity_init); -module_exit(dm_verity_exit); +module_dm(verity); MODULE_AUTHOR("Mikulas Patocka <mpatocka@redhat.com>"); MODULE_AUTHOR("Mandeep Baines <msb@chromium.org>"); diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c index 3aa5874f0aef..074cb785eafc 100644 --- a/drivers/md/dm-writecache.c +++ b/drivers/md/dm-writecache.c @@ -2773,27 +2773,7 @@ static struct target_type writecache_target = { .iterate_devices = writecache_iterate_devices, .io_hints = writecache_io_hints, }; - -static int __init dm_writecache_init(void) -{ - int r; - - r = dm_register_target(&writecache_target); - if (r < 0) { - DMERR("register failed %d", r); - return r; - } - - return 0; -} - -static void __exit dm_writecache_exit(void) -{ - dm_unregister_target(&writecache_target); -} - -module_init(dm_writecache_init); -module_exit(dm_writecache_exit); +module_dm(writecache); MODULE_DESCRIPTION(DM_NAME " writecache target"); MODULE_AUTHOR("Mikulas Patocka <dm-devel@redhat.com>"); diff --git a/drivers/md/dm-zero.c b/drivers/md/dm-zero.c index 2601cd520384..3b13e6eb1aa4 100644 --- a/drivers/md/dm-zero.c +++ b/drivers/md/dm-zero.c @@ -27,6 +27,7 @@ static int zero_ctr(struct dm_target *ti, unsigned int argc, char **argv) * Silently drop discards, avoiding -EOPNOTSUPP. */ ti->num_discard_bios = 1; + ti->discards_supported = true; return 0; } @@ -45,6 +46,7 @@ static int zero_map(struct dm_target *ti, struct bio *bio) zero_fill_bio(bio); break; case REQ_OP_WRITE: + case REQ_OP_DISCARD: /* writes get silently dropped */ break; default: @@ -57,32 +59,23 @@ static int zero_map(struct dm_target *ti, struct bio *bio) return DM_MAPIO_SUBMITTED; } +static void zero_io_hints(struct dm_target *ti, struct queue_limits *limits) +{ + limits->max_discard_sectors = UINT_MAX; + limits->max_hw_discard_sectors = UINT_MAX; + limits->discard_granularity = 512; +} + static struct target_type zero_target = { .name = "zero", - .version = {1, 1, 0}, + .version = {1, 2, 0}, .features = DM_TARGET_NOWAIT, .module = THIS_MODULE, .ctr = zero_ctr, .map = zero_map, + .io_hints = zero_io_hints, }; - -static int __init dm_zero_init(void) -{ - int r = dm_register_target(&zero_target); - - if (r < 0) - DMERR("register failed %d", r); - - return r; -} - -static void __exit dm_zero_exit(void) -{ - dm_unregister_target(&zero_target); -} - -module_init(dm_zero_init) -module_exit(dm_zero_exit) +module_dm(zero); MODULE_AUTHOR("Jana Saout <jana@saout.de>"); MODULE_DESCRIPTION(DM_NAME " dummy target returning zeros"); diff --git a/drivers/md/dm-zoned-metadata.c b/drivers/md/dm-zoned-metadata.c index cf9402064aba..8f0896a6990b 100644 --- a/drivers/md/dm-zoned-metadata.c +++ b/drivers/md/dm-zoned-metadata.c @@ -1701,7 +1701,7 @@ static int dmz_load_mapping(struct dmz_metadata *zmd) if (IS_ERR(dmap_mblk)) return PTR_ERR(dmap_mblk); zmd->map_mblk[i] = dmap_mblk; - dmap = (struct dmz_map *) dmap_mblk->data; + dmap = dmap_mblk->data; i++; e = 0; } @@ -1832,7 +1832,7 @@ static void dmz_set_chunk_mapping(struct dmz_metadata *zmd, unsigned int chunk, unsigned int dzone_id, unsigned int bzone_id) { struct dmz_mblock *dmap_mblk = zmd->map_mblk[chunk >> DMZ_MAP_ENTRIES_SHIFT]; - struct dmz_map *dmap = (struct dmz_map *) dmap_mblk->data; + struct dmz_map *dmap = dmap_mblk->data; int map_idx = chunk & DMZ_MAP_ENTRIES_MASK; dmap[map_idx].dzone_id = cpu_to_le32(dzone_id); @@ -2045,7 +2045,7 @@ struct dm_zone *dmz_get_chunk_mapping(struct dmz_metadata *zmd, unsigned int chunk, enum req_op op) { struct dmz_mblock *dmap_mblk = zmd->map_mblk[chunk >> DMZ_MAP_ENTRIES_SHIFT]; - struct dmz_map *dmap = (struct dmz_map *) dmap_mblk->data; + struct dmz_map *dmap = dmap_mblk->data; int dmap_idx = chunk & DMZ_MAP_ENTRIES_MASK; unsigned int dzone_id; struct dm_zone *dzone = NULL; diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c index ad4764dcd013..ad8e670a2f9b 100644 --- a/drivers/md/dm-zoned-target.c +++ b/drivers/md/dm-zoned-target.c @@ -1138,7 +1138,7 @@ static int dmz_message(struct dm_target *ti, unsigned int argc, char **argv, return r; } -static struct target_type dmz_type = { +static struct target_type zoned_target = { .name = "zoned", .version = {2, 0, 0}, .features = DM_TARGET_SINGLETON | DM_TARGET_MIXED_ZONED_MODEL, @@ -1154,19 +1154,7 @@ static struct target_type dmz_type = { .status = dmz_status, .message = dmz_message, }; - -static int __init dmz_init(void) -{ - return dm_register_target(&dmz_type); -} - -static void __exit dmz_exit(void) -{ - dm_unregister_target(&dmz_type); -} - -module_init(dmz_init); -module_exit(dmz_exit); +module_dm(zoned); MODULE_DESCRIPTION(DM_NAME " target for zoned block devices"); MODULE_AUTHOR("Damien Le Moal <damien.lemoal@wdc.com>"); diff --git a/drivers/md/dm.c b/drivers/md/dm.c index dfde0088147a..3b694ba3a106 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -1072,6 +1072,15 @@ static void dm_io_dec_pending(struct dm_io *io, blk_status_t error) __dm_io_dec_pending(io); } +/* + * The queue_limits are only valid as long as you have a reference + * count on 'md'. But _not_ imposing verification to avoid atomic_read(), + */ +static inline struct queue_limits *dm_get_queue_limits(struct mapped_device *md) +{ + return &md->queue->limits; +} + void disable_discard(struct mapped_device *md) { struct queue_limits *limits = dm_get_queue_limits(md); @@ -1162,7 +1171,8 @@ static inline sector_t max_io_len_target_boundary(struct dm_target *ti, return ti->len - target_offset; } -static sector_t max_io_len(struct dm_target *ti, sector_t sector) +static sector_t __max_io_len(struct dm_target *ti, sector_t sector, + unsigned int max_granularity) { sector_t target_offset = dm_target_offset(ti, sector); sector_t len = max_io_len_target_boundary(ti, target_offset); @@ -1173,11 +1183,16 @@ static sector_t max_io_len(struct dm_target *ti, sector_t sector) * explains why stacked chunk_sectors based splitting via * bio_split_to_limits() isn't possible here. */ - if (!ti->max_io_len) + if (!max_granularity) return len; return min_t(sector_t, len, min(queue_max_sectors(ti->table->md->queue), - blk_chunk_sectors_left(target_offset, ti->max_io_len))); + blk_chunk_sectors_left(target_offset, max_granularity))); +} + +static inline sector_t max_io_len(struct dm_target *ti, sector_t sector) +{ + return __max_io_len(ti, sector, ti->max_io_len); } int dm_set_target_max_io_len(struct dm_target *ti, sector_t len) @@ -1565,12 +1580,13 @@ static void __send_empty_flush(struct clone_info *ci) } static void __send_changing_extent_only(struct clone_info *ci, struct dm_target *ti, - unsigned int num_bios) + unsigned int num_bios, + unsigned int max_granularity) { unsigned int len, bios; len = min_t(sector_t, ci->sector_count, - max_io_len_target_boundary(ti, dm_target_offset(ti, ci->sector))); + __max_io_len(ti, ci->sector, max_granularity)); atomic_add(num_bios, &ci->io->io_count); bios = __send_duplicate_bios(ci, ti, num_bios, &len); @@ -1606,16 +1622,24 @@ static blk_status_t __process_abnormal_io(struct clone_info *ci, struct dm_target *ti) { unsigned int num_bios = 0; + unsigned int max_granularity = 0; + struct queue_limits *limits = dm_get_queue_limits(ti->table->md); switch (bio_op(ci->bio)) { case REQ_OP_DISCARD: num_bios = ti->num_discard_bios; + if (ti->max_discard_granularity) + max_granularity = limits->max_discard_sectors; break; case REQ_OP_SECURE_ERASE: num_bios = ti->num_secure_erase_bios; + if (ti->max_secure_erase_granularity) + max_granularity = limits->max_secure_erase_sectors; break; case REQ_OP_WRITE_ZEROES: num_bios = ti->num_write_zeroes_bios; + if (ti->max_write_zeroes_granularity) + max_granularity = limits->max_write_zeroes_sectors; break; default: break; @@ -1630,7 +1654,7 @@ static blk_status_t __process_abnormal_io(struct clone_info *ci, if (unlikely(!num_bios)) return BLK_STS_NOTSUPP; - __send_changing_extent_only(ci, ti, num_bios); + __send_changing_extent_only(ci, ti, num_bios, max_granularity); return BLK_STS_OK; } @@ -2297,17 +2321,6 @@ struct target_type *dm_get_immutable_target_type(struct mapped_device *md) } /* - * The queue_limits are only valid as long as you have a reference - * count on 'md'. - */ -struct queue_limits *dm_get_queue_limits(struct mapped_device *md) -{ - BUG_ON(!atomic_read(&md->holders)); - return &md->queue->limits; -} -EXPORT_SYMBOL_GPL(dm_get_queue_limits); - -/* * Setup the DM device's queue based on md's type */ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t) diff --git a/drivers/md/dm.h b/drivers/md/dm.h index 22eaed188907..a856e0aee73b 100644 --- a/drivers/md/dm.h +++ b/drivers/md/dm.h @@ -20,6 +20,7 @@ #include <linux/completion.h> #include <linux/kobject.h> #include <linux/refcount.h> +#include <linux/log2.h> #include "dm-stats.h" @@ -228,4 +229,25 @@ void dm_free_md_mempools(struct dm_md_mempools *pools); */ unsigned int dm_get_reserved_bio_based_ios(void); +#define DM_HASH_LOCKS_MAX 64 + +static inline unsigned int dm_num_hash_locks(void) +{ + unsigned int num_locks = roundup_pow_of_two(num_online_cpus()) << 1; + + return min_t(unsigned int, num_locks, DM_HASH_LOCKS_MAX); +} + +#define DM_HASH_LOCKS_MULT 4294967291ULL +#define DM_HASH_LOCKS_SHIFT 6 + +static inline unsigned int dm_hash_locks_index(sector_t block, + unsigned int num_locks) +{ + sector_t h1 = (block * DM_HASH_LOCKS_MULT) >> DM_HASH_LOCKS_SHIFT; + sector_t h2 = h1 >> DM_HASH_LOCKS_SHIFT; + + return (h1 ^ h2) & (num_locks - 1); +} + #endif |