summaryrefslogtreecommitdiff
path: root/fs/buffer.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/buffer.c')
-rw-r--r--fs/buffer.c13
1 files changed, 10 insertions, 3 deletions
diff --git a/fs/buffer.c b/fs/buffer.c
index 4d7f84e77d2f..b0b3792b1496 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -719,8 +719,15 @@ void mmb_mark_buffer_dirty(struct buffer_head *bh,
mark_buffer_dirty(bh);
if (!bh->b_mmb) {
spin_lock(&mmb->lock);
+ /*
+ * For a corrupted filesystem with multiply claimed blocks this
+ * can fail. Avoid corrupting the linked list in that case.
+ */
+ if (cmpxchg(&bh->b_mmb, NULL, mmb) != NULL) {
+ spin_unlock(&mmb->lock);
+ return;
+ }
list_move_tail(&bh->b_assoc_buffers, &mmb->list);
- bh->b_mmb = mmb;
spin_unlock(&mmb->lock);
}
}
@@ -822,8 +829,7 @@ struct buffer_head *folio_alloc_buffers(struct folio *folio, unsigned long size,
long offset;
struct mem_cgroup *memcg, *old_memcg;
- /* The folio lock pins the memcg */
- memcg = folio_memcg(folio);
+ memcg = get_mem_cgroup_from_folio(folio);
old_memcg = set_active_memcg(memcg);
head = NULL;
@@ -844,6 +850,7 @@ struct buffer_head *folio_alloc_buffers(struct folio *folio, unsigned long size,
}
out:
set_active_memcg(old_memcg);
+ mem_cgroup_put(memcg);
return head;
/*
* In case anything failed, we just free everything we got.