diff options
Diffstat (limited to 'fs/buffer.c')
| -rw-r--r-- | fs/buffer.c | 50 | 
1 files changed, 23 insertions, 27 deletions
diff --git a/fs/buffer.c b/fs/buffer.c index 838a9cf246bd..9f6d2e41281d 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -1036,6 +1036,9 @@ grow_buffers(struct block_device *bdev, sector_t block, int size)  static struct buffer_head *  __getblk_slow(struct block_device *bdev, sector_t block, int size)  { +	int ret; +	struct buffer_head *bh; +  	/* Size must be multiple of hard sectorsize */  	if (unlikely(size & (bdev_logical_block_size(bdev)-1) ||  			(size < 512 || size > PAGE_SIZE))) { @@ -1048,20 +1051,21 @@ __getblk_slow(struct block_device *bdev, sector_t block, int size)  		return NULL;  	} -	for (;;) { -		struct buffer_head * bh; -		int ret; +retry: +	bh = __find_get_block(bdev, block, size); +	if (bh) +		return bh; +	ret = grow_buffers(bdev, block, size); +	if (ret == 0) { +		free_more_memory(); +		goto retry; +	} else if (ret > 0) {  		bh = __find_get_block(bdev, block, size);  		if (bh)  			return bh; - -		ret = grow_buffers(bdev, block, size); -		if (ret < 0) -			return NULL; -		if (ret == 0) -			free_more_memory();  	} +	return NULL;  }  /* @@ -2302,8 +2306,8 @@ EXPORT_SYMBOL(block_commit_write);   * beyond EOF, then the page is guaranteed safe against truncation until we   * unlock the page.   * - * Direct callers of this function should call vfs_check_frozen() so that page - * fault does not busyloop until the fs is thawed. + * Direct callers of this function should protect against filesystem freezing + * using sb_start_write() - sb_end_write() functions.   */  int __block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,  			 get_block_t get_block) @@ -2314,6 +2318,12 @@ int __block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,  	loff_t size;  	int ret; +	/* +	 * Update file times before taking page lock. We may end up failing the +	 * fault so this update may be superfluous but who really cares... +	 */ +	file_update_time(vma->vm_file); +  	lock_page(page);  	size = i_size_read(inode);  	if ((page->mapping != inode->i_mapping) || @@ -2335,18 +2345,7 @@ int __block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,  	if (unlikely(ret < 0))  		goto out_unlock; -	/* -	 * Freezing in progress? We check after the page is marked dirty and -	 * with page lock held so if the test here fails, we are sure freezing -	 * code will wait during syncing until the page fault is done - at that -	 * point page will be dirty and unlocked so freezing code will write it -	 * and writeprotect it again. -	 */  	set_page_dirty(page); -	if (inode->i_sb->s_frozen != SB_UNFROZEN) { -		ret = -EAGAIN; -		goto out_unlock; -	}  	wait_on_page_writeback(page);  	return 0;  out_unlock: @@ -2361,12 +2360,9 @@ int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,  	int ret;  	struct super_block *sb = vma->vm_file->f_path.dentry->d_inode->i_sb; -	/* -	 * This check is racy but catches the common case. The check in -	 * __block_page_mkwrite() is reliable. -	 */ -	vfs_check_frozen(sb, SB_FREEZE_WRITE); +	sb_start_pagefault(sb);  	ret = __block_page_mkwrite(vma, vmf, get_block); +	sb_end_pagefault(sb);  	return block_page_mkwrite_return(ret);  }  EXPORT_SYMBOL(block_page_mkwrite);  | 
