summaryrefslogtreecommitdiff
path: root/fs/xfs/libxfs/xfs_bmap_btree.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/xfs/libxfs/xfs_bmap_btree.c')
-rw-r--r--fs/xfs/libxfs/xfs_bmap_btree.c143
1 files changed, 121 insertions, 22 deletions
diff --git a/fs/xfs/libxfs/xfs_bmap_btree.c b/fs/xfs/libxfs/xfs_bmap_btree.c
index 3464be771f95..188feac04b60 100644
--- a/fs/xfs/libxfs/xfs_bmap_btree.c
+++ b/fs/xfs/libxfs/xfs_bmap_btree.c
@@ -369,38 +369,26 @@ xfs_bmbt_init_rec_from_cur(
xfs_bmbt_disk_set_all(&rec->bmbt, &cur->bc_rec.b);
}
-STATIC int64_t
-xfs_bmbt_key_diff(
+STATIC int
+xfs_bmbt_cmp_key_with_cur(
struct xfs_btree_cur *cur,
const union xfs_btree_key *key)
{
- return (int64_t)be64_to_cpu(key->bmbt.br_startoff) -
- cur->bc_rec.b.br_startoff;
+ return cmp_int(be64_to_cpu(key->bmbt.br_startoff),
+ cur->bc_rec.b.br_startoff);
}
-STATIC int64_t
-xfs_bmbt_diff_two_keys(
+STATIC int
+xfs_bmbt_cmp_two_keys(
struct xfs_btree_cur *cur,
const union xfs_btree_key *k1,
const union xfs_btree_key *k2,
const union xfs_btree_key *mask)
{
- uint64_t a = be64_to_cpu(k1->bmbt.br_startoff);
- uint64_t b = be64_to_cpu(k2->bmbt.br_startoff);
-
ASSERT(!mask || mask->bmbt.br_startoff);
- /*
- * Note: This routine previously casted a and b to int64 and subtracted
- * them to generate a result. This lead to problems if b was the
- * "maximum" key value (all ones) being signed incorrectly, hence this
- * somewhat less efficient version.
- */
- if (a > b)
- return 1;
- if (b > a)
- return -1;
- return 0;
+ return cmp_int(be64_to_cpu(k1->bmbt.br_startoff),
+ be64_to_cpu(k2->bmbt.br_startoff));
}
static xfs_failaddr_t
@@ -516,6 +504,116 @@ xfs_bmbt_keys_contiguous(
be64_to_cpu(key2->bmbt.br_startoff));
}
+static inline void
+xfs_bmbt_move_ptrs(
+ struct xfs_mount *mp,
+ struct xfs_btree_block *broot,
+ short old_size,
+ size_t new_size,
+ unsigned int numrecs)
+{
+ void *dptr;
+ void *sptr;
+
+ sptr = xfs_bmap_broot_ptr_addr(mp, broot, 1, old_size);
+ dptr = xfs_bmap_broot_ptr_addr(mp, broot, 1, new_size);
+ memmove(dptr, sptr, numrecs * sizeof(xfs_bmbt_ptr_t));
+}
+
+/*
+ * Reallocate the space for if_broot based on the number of records. Move the
+ * records and pointers in if_broot to fit the new size. When shrinking this
+ * will eliminate holes between the records and pointers created by the caller.
+ * When growing this will create holes to be filled in by the caller.
+ *
+ * The caller must not request to add more records than would fit in the
+ * on-disk inode root. If the if_broot is currently NULL, then if we are
+ * adding records, one will be allocated. The caller must also not request
+ * that the number of records go below zero, although it can go to zero.
+ *
+ * ip -- the inode whose if_broot area is changing
+ * whichfork -- which inode fork to change
+ * new_numrecs -- the new number of records requested for the if_broot array
+ *
+ * Returns the incore btree root block.
+ */
+struct xfs_btree_block *
+xfs_bmap_broot_realloc(
+ struct xfs_inode *ip,
+ int whichfork,
+ unsigned int new_numrecs)
+{
+ struct xfs_mount *mp = ip->i_mount;
+ struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
+ struct xfs_btree_block *broot;
+ unsigned int new_size;
+ unsigned int old_size = ifp->if_broot_bytes;
+
+ /*
+ * Block mapping btrees do not support storing zero records; if this
+ * happens, the fork is being changed to FMT_EXTENTS. Free the broot
+ * and get out.
+ */
+ if (new_numrecs == 0)
+ return xfs_broot_realloc(ifp, 0);
+
+ new_size = xfs_bmap_broot_space_calc(mp, new_numrecs);
+
+ /* Handle the nop case quietly. */
+ if (new_size == old_size)
+ return ifp->if_broot;
+
+ if (new_size > old_size) {
+ unsigned int old_numrecs;
+
+ /*
+ * If there wasn't any memory allocated before, just
+ * allocate it now and get out.
+ */
+ if (old_size == 0)
+ return xfs_broot_realloc(ifp, new_size);
+
+ /*
+ * If there is already an existing if_broot, then we need
+ * to realloc() it and shift the pointers to their new
+ * location. The records don't change location because
+ * they are kept butted up against the btree block header.
+ */
+ old_numrecs = xfs_bmbt_maxrecs(mp, old_size, false);
+ broot = xfs_broot_realloc(ifp, new_size);
+ ASSERT(xfs_bmap_bmdr_space(broot) <=
+ xfs_inode_fork_size(ip, whichfork));
+ xfs_bmbt_move_ptrs(mp, broot, old_size, new_size, old_numrecs);
+ return broot;
+ }
+
+ /*
+ * We're reducing, but not totally eliminating, numrecs. In this case,
+ * we are shrinking the if_broot buffer, so it must already exist.
+ */
+ ASSERT(ifp->if_broot != NULL && old_size > 0 && new_size > 0);
+
+ /*
+ * Shrink the btree root by moving the bmbt pointers, since they are
+ * not butted up against the btree block header, then reallocating
+ * broot.
+ */
+ xfs_bmbt_move_ptrs(mp, ifp->if_broot, old_size, new_size, new_numrecs);
+ broot = xfs_broot_realloc(ifp, new_size);
+ ASSERT(xfs_bmap_bmdr_space(broot) <=
+ xfs_inode_fork_size(ip, whichfork));
+ return broot;
+}
+
+static struct xfs_btree_block *
+xfs_bmbt_broot_realloc(
+ struct xfs_btree_cur *cur,
+ unsigned int new_numrecs)
+{
+ return xfs_bmap_broot_realloc(cur->bc_ino.ip, cur->bc_ino.whichfork,
+ new_numrecs);
+}
+
const struct xfs_btree_ops xfs_bmbt_ops = {
.name = "bmap",
.type = XFS_BTREE_TYPE_INODE,
@@ -537,12 +635,13 @@ const struct xfs_btree_ops xfs_bmbt_ops = {
.init_key_from_rec = xfs_bmbt_init_key_from_rec,
.init_high_key_from_rec = xfs_bmbt_init_high_key_from_rec,
.init_rec_from_cur = xfs_bmbt_init_rec_from_cur,
- .key_diff = xfs_bmbt_key_diff,
- .diff_two_keys = xfs_bmbt_diff_two_keys,
+ .cmp_key_with_cur = xfs_bmbt_cmp_key_with_cur,
+ .cmp_two_keys = xfs_bmbt_cmp_two_keys,
.buf_ops = &xfs_bmbt_buf_ops,
.keys_inorder = xfs_bmbt_keys_inorder,
.recs_inorder = xfs_bmbt_recs_inorder,
.keys_contiguous = xfs_bmbt_keys_contiguous,
+ .broot_realloc = xfs_bmbt_broot_realloc,
};
/*