summaryrefslogtreecommitdiff
path: root/fs/buffer.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/buffer.c')
-rw-r--r--fs/buffer.c60
1 files changed, 17 insertions, 43 deletions
diff --git a/fs/buffer.c b/fs/buffer.c
index bd091329026c..2379564e5aea 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -49,6 +49,7 @@
#include <trace/events/block.h>
#include <linux/fscrypt.h>
#include <linux/fsverity.h>
+#include <linux/sched/isolation.h>
#include "internal.h"
@@ -562,12 +563,6 @@ repeat:
return err;
}
-void emergency_thaw_bdev(struct super_block *sb)
-{
- while (sb->s_bdev && !thaw_bdev(sb->s_bdev))
- printk(KERN_WARNING "Emergency Thaw on %pg\n", sb->s_bdev);
-}
-
/**
* sync_mapping_buffers - write out & wait upon a mapping's "associated" buffers
* @mapping: the mapping which wants those buffers written
@@ -1225,19 +1220,14 @@ EXPORT_SYMBOL(mark_buffer_dirty);
void mark_buffer_write_io_error(struct buffer_head *bh)
{
- struct super_block *sb;
-
set_buffer_write_io_error(bh);
/* FIXME: do we need to set this in both places? */
if (bh->b_folio && bh->b_folio->mapping)
mapping_set_error(bh->b_folio->mapping, -EIO);
- if (bh->b_assoc_map)
+ if (bh->b_assoc_map) {
mapping_set_error(bh->b_assoc_map, -EIO);
- rcu_read_lock();
- sb = READ_ONCE(bh->b_bdev->bd_super);
- if (sb)
- errseq_set(&sb->s_wb_err, -EIO);
- rcu_read_unlock();
+ errseq_set(&bh->b_assoc_map->host->i_sb->s_wb_err, -EIO);
+ }
}
EXPORT_SYMBOL(mark_buffer_write_io_error);
@@ -1352,7 +1342,7 @@ static void bh_lru_install(struct buffer_head *bh)
* failing page migration.
* Skip putting upcoming bh into bh_lru until migration is done.
*/
- if (lru_cache_disabled()) {
+ if (lru_cache_disabled() || cpu_is_isolated(smp_processor_id())) {
bh_lru_unlock();
return;
}
@@ -1382,6 +1372,10 @@ lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size)
check_irqs_on();
bh_lru_lock();
+ if (cpu_is_isolated(smp_processor_id())) {
+ bh_lru_unlock();
+ return NULL;
+ }
for (i = 0; i < BH_LRU_SIZE; i++) {
struct buffer_head *bh = __this_cpu_read(bh_lrus.bhs[i]);
@@ -1539,21 +1533,6 @@ void invalidate_bh_lrus_cpu(void)
bh_lru_unlock();
}
-void set_bh_page(struct buffer_head *bh,
- struct page *page, unsigned long offset)
-{
- bh->b_page = page;
- BUG_ON(offset >= PAGE_SIZE);
- if (PageHighMem(page))
- /*
- * This catches illegal uses and preserves the offset:
- */
- bh->b_data = (char *)(0 + offset);
- else
- bh->b_data = page_address(page) + offset;
-}
-EXPORT_SYMBOL(set_bh_page);
-
void folio_set_bh(struct buffer_head *bh, struct folio *folio,
unsigned long offset)
{
@@ -2180,8 +2159,7 @@ int __block_write_begin(struct page *page, loff_t pos, unsigned len,
}
EXPORT_SYMBOL(__block_write_begin);
-static int __block_commit_write(struct inode *inode, struct folio *folio,
- size_t from, size_t to)
+static void __block_commit_write(struct folio *folio, size_t from, size_t to)
{
size_t block_start, block_end;
bool partial = false;
@@ -2216,7 +2194,6 @@ static int __block_commit_write(struct inode *inode, struct folio *folio,
*/
if (!partial)
folio_mark_uptodate(folio);
- return 0;
}
/*
@@ -2253,7 +2230,6 @@ int block_write_end(struct file *file, struct address_space *mapping,
struct page *page, void *fsdata)
{
struct folio *folio = page_folio(page);
- struct inode *inode = mapping->host;
size_t start = pos - folio_pos(folio);
if (unlikely(copied < len)) {
@@ -2277,7 +2253,7 @@ int block_write_end(struct file *file, struct address_space *mapping,
flush_dcache_folio(folio);
/* This could be a short (even 0-length) commit */
- __block_commit_write(inode, folio, start, start + copied);
+ __block_commit_write(folio, start, start + copied);
return copied;
}
@@ -2598,12 +2574,10 @@ int cont_write_begin(struct file *file, struct address_space *mapping,
}
EXPORT_SYMBOL(cont_write_begin);
-int block_commit_write(struct page *page, unsigned from, unsigned to)
+void block_commit_write(struct page *page, unsigned from, unsigned to)
{
struct folio *folio = page_folio(page);
- struct inode *inode = folio->mapping->host;
- __block_commit_write(inode, folio, from, to);
- return 0;
+ __block_commit_write(folio, from, to);
}
EXPORT_SYMBOL(block_commit_write);
@@ -2649,11 +2623,11 @@ int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
end = size - folio_pos(folio);
ret = __block_write_begin_int(folio, 0, end, get_block, NULL);
- if (!ret)
- ret = __block_commit_write(inode, folio, 0, end);
-
- if (unlikely(ret < 0))
+ if (unlikely(ret))
goto out_unlock;
+
+ __block_commit_write(folio, 0, end);
+
folio_mark_dirty(folio);
folio_wait_stable(folio);
return 0;