summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJan Kara <jack@suse.cz>2026-03-26 12:54:25 +0300
committerChristian Brauner <brauner@kernel.org>2026-03-26 17:03:31 +0300
commitc86f5d25514c2a60fcf5ea0aa11c5d8bd1a313ef (patch)
treeca961a0d75763eee6b8545b6c21ff4d327ca7240
parent521bea7cec8a79684402d555caab408ed43171d5 (diff)
downloadlinux-c86f5d25514c2a60fcf5ea0aa11c5d8bd1a313ef.tar.xz
fs: Make bhs point to mapping_metadata_bhs
Make buffer heads point to mapping_metadata_bhs instead of struct address_space. This makes the code more self contained. For the (only) case of IO error handling where we really need to reach struct address_space add a pointer to the mapping from mapping_metadata_bhs. Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jan Kara <jack@suse.cz> Link: https://patch.msgid.link/20260326095354.16340-73-jack@suse.cz Signed-off-by: Christian Brauner <brauner@kernel.org>
-rw-r--r--fs/buffer.c34
-rw-r--r--fs/inode.c1
-rw-r--r--include/linux/buffer_head.h4
-rw-r--r--include/linux/fs.h1
4 files changed, 20 insertions, 20 deletions
diff --git a/fs/buffer.c b/fs/buffer.c
index 294f9cd07f42..67b3d4624503 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -497,13 +497,12 @@ static void __remove_assoc_queue(struct mapping_metadata_bhs *mmb,
{
lockdep_assert_held(&mmb->lock);
list_del_init(&bh->b_assoc_buffers);
- WARN_ON(!bh->b_assoc_map);
- bh->b_assoc_map = NULL;
+ WARN_ON(!bh->b_mmb);
+ bh->b_mmb = NULL;
}
static void remove_assoc_queue(struct buffer_head *bh)
{
- struct address_space *mapping;
struct mapping_metadata_bhs *mmb;
/*
@@ -514,13 +513,12 @@ static void remove_assoc_queue(struct buffer_head *bh)
* opportunistically acquire the lock and then recheck the bh
* didn't move under us.
*/
- while (bh->b_assoc_map) {
+ while (bh->b_mmb) {
rcu_read_lock();
- mapping = READ_ONCE(bh->b_assoc_map);
- if (mapping) {
- mmb = &mapping->i_metadata_bhs;
+ mmb = READ_ONCE(bh->b_mmb);
+ if (mmb) {
spin_lock(&mmb->lock);
- if (bh->b_assoc_map == mapping)
+ if (bh->b_mmb == mmb)
__remove_assoc_queue(mmb, bh);
spin_unlock(&mmb->lock);
}
@@ -551,9 +549,9 @@ EXPORT_SYMBOL_GPL(inode_has_buffers);
* Do this in two main stages: first we copy dirty buffers to a
* temporary inode list, queueing the writes as we go. Then we clean
* up, waiting for those writes to complete. mark_buffer_dirty_inode()
- * doesn't touch b_assoc_buffers list if b_assoc_map is not NULL so we
- * are sure the buffer stays on our list until IO completes (at which point
- * it can be reaped).
+ * doesn't touch b_assoc_buffers list if b_mmb is not NULL so we are sure the
+ * buffer stays on our list until IO completes (at which point it can be
+ * reaped).
*/
int sync_mapping_buffers(struct address_space *mapping)
{
@@ -571,14 +569,14 @@ int sync_mapping_buffers(struct address_space *mapping)
spin_lock(&mmb->lock);
while (!list_empty(&mmb->list)) {
bh = BH_ENTRY(mmb->list.next);
- WARN_ON_ONCE(bh->b_assoc_map != mapping);
+ WARN_ON_ONCE(bh->b_mmb != mmb);
__remove_assoc_queue(mmb, bh);
/* Avoid race with mark_buffer_dirty_inode() which does
* a lockless check and we rely on seeing the dirty bit */
smp_mb();
if (buffer_dirty(bh) || buffer_locked(bh)) {
list_add(&bh->b_assoc_buffers, &tmp);
- bh->b_assoc_map = mapping;
+ bh->b_mmb = mmb;
if (buffer_dirty(bh)) {
get_bh(bh);
spin_unlock(&mmb->lock);
@@ -616,7 +614,7 @@ int sync_mapping_buffers(struct address_space *mapping)
smp_mb();
if (buffer_dirty(bh)) {
list_add(&bh->b_assoc_buffers, &mmb->list);
- bh->b_assoc_map = mapping;
+ bh->b_mmb = mmb;
}
spin_unlock(&mmb->lock);
wait_on_buffer(bh);
@@ -724,11 +722,11 @@ void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
struct address_space *mapping = inode->i_mapping;
mark_buffer_dirty(bh);
- if (!bh->b_assoc_map) {
+ if (!bh->b_mmb) {
spin_lock(&mapping->i_metadata_bhs.lock);
list_move_tail(&bh->b_assoc_buffers,
&mapping->i_metadata_bhs.list);
- bh->b_assoc_map = mapping;
+ bh->b_mmb = &mapping->i_metadata_bhs;
spin_unlock(&mapping->i_metadata_bhs.lock);
}
}
@@ -1124,8 +1122,8 @@ void mark_buffer_write_io_error(struct buffer_head *bh)
/* FIXME: do we need to set this in both places? */
if (bh->b_folio && bh->b_folio->mapping)
mapping_set_error(bh->b_folio->mapping, -EIO);
- if (bh->b_assoc_map)
- mapping_set_error(bh->b_assoc_map, -EIO);
+ if (bh->b_mmb)
+ mapping_set_error(bh->b_mmb->mapping, -EIO);
}
EXPORT_SYMBOL(mark_buffer_write_io_error);
diff --git a/fs/inode.c b/fs/inode.c
index 393f586d050a..3874b933abdb 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -276,6 +276,7 @@ int inode_init_always_gfp(struct super_block *sb, struct inode *inode, gfp_t gfp
mapping->a_ops = &empty_aops;
mapping->host = inode;
+ mapping->i_metadata_bhs.mapping = mapping;
mapping->flags = 0;
mapping->wb_err = 0;
atomic_set(&mapping->i_mmap_writable, 0);
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index 631bf971efc0..20636599d858 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -73,8 +73,8 @@ struct buffer_head {
bh_end_io_t *b_end_io; /* I/O completion */
void *b_private; /* reserved for b_end_io */
struct list_head b_assoc_buffers; /* associated with another mapping */
- struct address_space *b_assoc_map; /* mapping this buffer is
- associated with */
+ struct mapping_metadata_bhs *b_mmb; /* head of the list of metadata bhs
+ * this buffer is associated with */
atomic_t b_count; /* users using this buffer_head */
spinlock_t b_uptodate_lock; /* Used by the first bh in a page, to
* serialise IO completion of other
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 76360b0040e0..fa2a812bd718 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -447,6 +447,7 @@ extern const struct address_space_operations empty_aops;
/* Structure for tracking metadata buffer heads associated with the mapping */
struct mapping_metadata_bhs {
+ struct address_space *mapping; /* Mapping bhs are associated with */
spinlock_t lock; /* Lock protecting bh list */
struct list_head list; /* The list of bhs (b_assoc_buffers) */
};