summaryrefslogtreecommitdiff
path: root/fs/xfs/xfs_bmap_util.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/xfs/xfs_bmap_util.c')
-rw-r--r--fs/xfs/xfs_bmap_util.c80
1 files changed, 53 insertions, 27 deletions
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
index 4719ec90029c..06ca11731e43 100644
--- a/fs/xfs/xfs_bmap_util.c
+++ b/fs/xfs/xfs_bmap_util.c
@@ -29,6 +29,8 @@
#include "xfs_iomap.h"
#include "xfs_reflink.h"
#include "xfs_rtbitmap.h"
+#include "xfs_rtgroup.h"
+#include "xfs_zone_alloc.h"
/* Kernel only BMAP related definitions and functions */
@@ -41,16 +43,12 @@ xfs_daddr_t
xfs_fsb_to_db(struct xfs_inode *ip, xfs_fsblock_t fsb)
{
if (XFS_IS_REALTIME_INODE(ip))
- return XFS_FSB_TO_BB(ip->i_mount, fsb);
+ return xfs_rtb_to_daddr(ip->i_mount, fsb);
return XFS_FSB_TO_DADDR(ip->i_mount, fsb);
}
/*
* Routine to zero an extent on disk allocated to the specific inode.
- *
- * The VFS functions take a linearised filesystem block offset, so we have to
- * convert the sparse xfs fsb to the right format first.
- * VFS types are real funky, too.
*/
int
xfs_zero_extent(
@@ -58,15 +56,10 @@ xfs_zero_extent(
xfs_fsblock_t start_fsb,
xfs_off_t count_fsb)
{
- struct xfs_mount *mp = ip->i_mount;
- struct xfs_buftarg *target = xfs_inode_buftarg(ip);
- xfs_daddr_t sector = xfs_fsb_to_db(ip, start_fsb);
- sector_t block = XFS_BB_TO_FSBT(mp, sector);
-
- return blkdev_issue_zeroout(target->bt_bdev,
- block << (mp->m_super->s_blocksize_bits - 9),
- count_fsb << (mp->m_super->s_blocksize_bits - 9),
- GFP_KERNEL, 0);
+ return blkdev_issue_zeroout(xfs_inode_buftarg(ip)->bt_bdev,
+ xfs_fsb_to_db(ip, start_fsb),
+ XFS_FSB_TO_BB(ip->i_mount, count_fsb),
+ GFP_KERNEL, 0);
}
/*
@@ -111,7 +104,7 @@ xfs_bmap_count_blocks(
struct xfs_mount *mp = ip->i_mount;
struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
struct xfs_btree_cur *cur;
- xfs_extlen_t btblocks = 0;
+ xfs_filblks_t btblocks = 0;
int error;
*nextents = 0;
@@ -444,7 +437,8 @@ xfs_bmap_punch_delalloc_range(
struct xfs_inode *ip,
int whichfork,
xfs_off_t start_byte,
- xfs_off_t end_byte)
+ xfs_off_t end_byte,
+ struct xfs_zone_alloc_ctx *ac)
{
struct xfs_mount *mp = ip->i_mount;
struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
@@ -475,7 +469,21 @@ xfs_bmap_punch_delalloc_range(
continue;
}
- xfs_bmap_del_extent_delay(ip, whichfork, &icur, &got, &del);
+ if (xfs_is_zoned_inode(ip) && ac) {
+ /*
+ * In a zoned buffered write context we need to return
+ * the punched delalloc allocations to the allocation
+ * context. This allows reusing them in the following
+ * iomap iterations.
+ */
+ xfs_bmap_del_extent_delay(ip, whichfork, &icur, &got,
+ &del, XFS_BMAPI_REMAP);
+ ac->reserved_blocks += del.br_blockcount;
+ } else {
+ xfs_bmap_del_extent_delay(ip, whichfork, &icur, &got,
+ &del, 0);
+ }
+
if (!xfs_iext_get_extent(ifp, &icur, &got))
break;
}
@@ -540,16 +548,20 @@ xfs_can_free_eofblocks(
*/
end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_ISIZE(ip));
if (xfs_inode_has_bigrtalloc(ip))
- end_fsb = xfs_rtb_roundup_rtx(mp, end_fsb);
+ end_fsb = xfs_fileoff_roundup_rtx(mp, end_fsb);
last_fsb = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
if (last_fsb <= end_fsb)
return false;
/*
- * Check if there is an post-EOF extent to free.
+ * Check if there is an post-EOF extent to free. If there are any
+ * delalloc blocks attached to the inode (data fork delalloc
+ * reservations or CoW extents of any kind), we need to free them so
+ * that inactivation doesn't fail to erase them.
*/
xfs_ilock(ip, XFS_ILOCK_SHARED);
- if (xfs_iext_lookup_extent(ip, &ip->i_df, end_fsb, &icur, &imap))
+ if (ip->i_delayed_blks ||
+ xfs_iext_lookup_extent(ip, &ip->i_df, end_fsb, &icur, &imap))
found_blocks = true;
xfs_iunlock(ip, XFS_ILOCK_SHARED);
return found_blocks;
@@ -586,7 +598,7 @@ xfs_free_eofblocks(
if (ip->i_delayed_blks) {
xfs_bmap_punch_delalloc_range(ip, XFS_DATA_FORK,
round_up(XFS_ISIZE(ip), mp->m_sb.sb_blocksize),
- LLONG_MAX);
+ LLONG_MAX, NULL);
}
xfs_inode_clear_eofblocks_tag(ip);
return 0;
@@ -829,7 +841,8 @@ int
xfs_free_file_space(
struct xfs_inode *ip,
xfs_off_t offset,
- xfs_off_t len)
+ xfs_off_t len,
+ struct xfs_zone_alloc_ctx *ac)
{
struct xfs_mount *mp = ip->i_mount;
xfs_fileoff_t startoffset_fsb;
@@ -858,8 +871,8 @@ xfs_free_file_space(
/* We can only free complete realtime extents. */
if (xfs_inode_has_bigrtalloc(ip)) {
- startoffset_fsb = xfs_rtb_roundup_rtx(mp, startoffset_fsb);
- endoffset_fsb = xfs_rtb_rounddown_rtx(mp, endoffset_fsb);
+ startoffset_fsb = xfs_fileoff_roundup_rtx(mp, startoffset_fsb);
+ endoffset_fsb = xfs_fileoff_rounddown_rtx(mp, endoffset_fsb);
}
/*
@@ -884,7 +897,7 @@ xfs_free_file_space(
return 0;
if (offset + len > XFS_ISIZE(ip))
len = XFS_ISIZE(ip) - offset;
- error = xfs_zero_range(ip, offset, len, NULL);
+ error = xfs_zero_range(ip, offset, len, ac, NULL);
if (error)
return error;
@@ -972,7 +985,8 @@ int
xfs_collapse_file_space(
struct xfs_inode *ip,
xfs_off_t offset,
- xfs_off_t len)
+ xfs_off_t len,
+ struct xfs_zone_alloc_ctx *ac)
{
struct xfs_mount *mp = ip->i_mount;
struct xfs_trans *tp;
@@ -985,7 +999,7 @@ xfs_collapse_file_space(
trace_xfs_collapse_file_space(ip);
- error = xfs_free_file_space(ip, offset, len);
+ error = xfs_free_file_space(ip, offset, len, ac);
if (error)
return error;
@@ -1527,6 +1541,18 @@ xfs_swap_extents(
goto out_unlock;
}
+ /*
+ * The rmapbt implementation is unable to resume a swapext operation
+ * after a crash if the allocation unit size is larger than a block.
+ * This (deprecated) interface will not be upgraded to handle this
+ * situation. Defragmentation must be performed with the commit range
+ * ioctl.
+ */
+ if (XFS_IS_REALTIME_INODE(ip) && xfs_has_rtgroups(ip->i_mount)) {
+ error = -EOPNOTSUPP;
+ goto out_unlock;
+ }
+
error = xfs_qm_dqattach(ip);
if (error)
goto out_unlock;