diff options
Diffstat (limited to 'fs/xfs/libxfs')
-rw-r--r-- | fs/xfs/libxfs/xfs_alloc.c | 50 | ||||
-rw-r--r-- | fs/xfs/libxfs/xfs_alloc.h | 3 | ||||
-rw-r--r-- | fs/xfs/libxfs/xfs_attr.c | 22 | ||||
-rw-r--r-- | fs/xfs/libxfs/xfs_bmap.c | 315 | ||||
-rw-r--r-- | fs/xfs/libxfs/xfs_btree.c | 33 | ||||
-rw-r--r-- | fs/xfs/libxfs/xfs_dir2.h | 2 | ||||
-rw-r--r-- | fs/xfs/libxfs/xfs_dir2_sf.c | 2 | ||||
-rw-r--r-- | fs/xfs/libxfs/xfs_errortag.h | 6 | ||||
-rw-r--r-- | fs/xfs/libxfs/xfs_fs.h | 1 | ||||
-rw-r--r-- | fs/xfs/libxfs/xfs_inode_fork.c | 27 | ||||
-rw-r--r-- | fs/xfs/libxfs/xfs_inode_fork.h | 63 | ||||
-rw-r--r-- | fs/xfs/libxfs/xfs_sb.c | 2 |
12 files changed, 393 insertions, 133 deletions
diff --git a/fs/xfs/libxfs/xfs_alloc.c b/fs/xfs/libxfs/xfs_alloc.c index 7cb9f064ac64..0c623d3c1036 100644 --- a/fs/xfs/libxfs/xfs_alloc.c +++ b/fs/xfs/libxfs/xfs_alloc.c @@ -2474,6 +2474,47 @@ xfs_defer_agfl_block( xfs_defer_add(tp, XFS_DEFER_OPS_TYPE_AGFL_FREE, &new->xefi_list); } +#ifdef DEBUG +/* + * Check if an AGF has a free extent record whose length is equal to + * args->minlen. + */ +STATIC int +xfs_exact_minlen_extent_available( + struct xfs_alloc_arg *args, + struct xfs_buf *agbp, + int *stat) +{ + struct xfs_btree_cur *cnt_cur; + xfs_agblock_t fbno; + xfs_extlen_t flen; + int error = 0; + + cnt_cur = xfs_allocbt_init_cursor(args->mp, args->tp, agbp, + args->agno, XFS_BTNUM_CNT); + error = xfs_alloc_lookup_ge(cnt_cur, 0, args->minlen, stat); + if (error) + goto out; + + if (*stat == 0) { + error = -EFSCORRUPTED; + goto out; + } + + error = xfs_alloc_get_rec(cnt_cur, &fbno, &flen, stat); + if (error) + goto out; + + if (*stat == 1 && flen != args->minlen) + *stat = 0; + +out: + xfs_btree_del_cursor(cnt_cur, error); + + return error; +} +#endif + /* * Decide whether to use this allocation group for this allocation. * If so, fix up the btree freelist's size. @@ -2545,6 +2586,15 @@ xfs_alloc_fix_freelist( if (!xfs_alloc_space_available(args, need, flags)) goto out_agbp_relse; +#ifdef DEBUG + if (args->alloc_minlen_only) { + int stat; + + error = xfs_exact_minlen_extent_available(args, agbp, &stat); + if (error || !stat) + goto out_agbp_relse; + } +#endif /* * Make the freelist shorter if it's too long. * diff --git a/fs/xfs/libxfs/xfs_alloc.h b/fs/xfs/libxfs/xfs_alloc.h index 6c22b12176b8..a4427c5775c2 100644 --- a/fs/xfs/libxfs/xfs_alloc.h +++ b/fs/xfs/libxfs/xfs_alloc.h @@ -75,6 +75,9 @@ typedef struct xfs_alloc_arg { char wasfromfl; /* set if allocation is from freelist */ struct xfs_owner_info oinfo; /* owner of blocks being allocated */ enum xfs_ag_resv_type resv; /* block reservation to use */ +#ifdef DEBUG + bool alloc_minlen_only; /* allocate exact minlen extent */ +#endif } xfs_alloc_arg_t; /* diff --git a/fs/xfs/libxfs/xfs_attr.c b/fs/xfs/libxfs/xfs_attr.c index fd8e6418a0d3..472b3039eabb 100644 --- a/fs/xfs/libxfs/xfs_attr.c +++ b/fs/xfs/libxfs/xfs_attr.c @@ -396,6 +396,7 @@ xfs_attr_set( struct xfs_trans_res tres; bool rsvd = (args->attr_filter & XFS_ATTR_ROOT); int error, local; + int rmt_blks = 0; unsigned int total; if (XFS_FORCED_SHUTDOWN(dp->i_mount)) @@ -442,34 +443,33 @@ xfs_attr_set( tres.tr_logcount = XFS_ATTRSET_LOG_COUNT; tres.tr_logflags = XFS_TRANS_PERM_LOG_RES; total = args->total; + + if (!local) + rmt_blks = xfs_attr3_rmt_blocks(mp, args->valuelen); } else { XFS_STATS_INC(mp, xs_attr_remove); tres = M_RES(mp)->tr_attrrm; total = XFS_ATTRRM_SPACE_RES(mp); + rmt_blks = xfs_attr3_rmt_blocks(mp, XFS_XATTR_SIZE_MAX); } /* * Root fork attributes can use reserved data blocks for this * operation if necessary */ - error = xfs_trans_alloc(mp, &tres, total, 0, - rsvd ? XFS_TRANS_RESERVE : 0, &args->trans); + error = xfs_trans_alloc_inode(dp, &tres, total, 0, rsvd, &args->trans); if (error) return error; - xfs_ilock(dp, XFS_ILOCK_EXCL); - xfs_trans_ijoin(args->trans, dp, 0); - if (args->value) { - unsigned int quota_flags = XFS_QMOPT_RES_REGBLKS; - - if (rsvd) - quota_flags |= XFS_QMOPT_FORCE_RES; - error = xfs_trans_reserve_quota_nblks(args->trans, dp, - args->total, 0, quota_flags); + if (args->value || xfs_inode_hasattr(dp)) { + error = xfs_iext_count_may_overflow(dp, XFS_ATTR_FORK, + XFS_IEXT_ATTR_MANIP_CNT(rmt_blks)); if (error) goto out_trans_cancel; + } + if (args->value) { error = xfs_has_attr(args); if (error == -EEXIST && (args->attr_flags & XATTR_CREATE)) goto out_trans_cancel; diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c index bc446418e227..e0905ad171f0 100644 --- a/fs/xfs/libxfs/xfs_bmap.c +++ b/fs/xfs/libxfs/xfs_bmap.c @@ -1079,21 +1079,13 @@ xfs_bmap_add_attrfork( blks = XFS_ADDAFORK_SPACE_RES(mp); - error = xfs_trans_alloc(mp, &M_RES(mp)->tr_addafork, blks, 0, - rsvd ? XFS_TRANS_RESERVE : 0, &tp); + error = xfs_trans_alloc_inode(ip, &M_RES(mp)->tr_addafork, blks, 0, + rsvd, &tp); if (error) return error; - - xfs_ilock(ip, XFS_ILOCK_EXCL); - error = xfs_trans_reserve_quota_nblks(tp, ip, blks, 0, rsvd ? - XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_FORCE_RES : - XFS_QMOPT_RES_REGBLKS); - if (error) - goto trans_cancel; if (XFS_IFORK_Q(ip)) goto trans_cancel; - xfs_trans_ijoin(tp, ip, 0); xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); error = xfs_bmap_set_attrforkoff(ip, size, &version); if (error) @@ -3463,34 +3455,16 @@ xfs_bmap_btalloc_accounting( args->len); } -STATIC int -xfs_bmap_btalloc( - struct xfs_bmalloca *ap) /* bmap alloc argument struct */ +static int +xfs_bmap_compute_alignments( + struct xfs_bmalloca *ap, + struct xfs_alloc_arg *args) { - xfs_mount_t *mp; /* mount point structure */ - xfs_alloctype_t atype = 0; /* type for allocation routines */ - xfs_extlen_t align = 0; /* minimum allocation alignment */ - xfs_agnumber_t fb_agno; /* ag number of ap->firstblock */ - xfs_agnumber_t ag; - xfs_alloc_arg_t args; - xfs_fileoff_t orig_offset; - xfs_extlen_t orig_length; - xfs_extlen_t blen; - xfs_extlen_t nextminlen = 0; - int nullfb; /* true if ap->firstblock isn't set */ - int isaligned; - int tryagain; - int error; - int stripe_align; - - ASSERT(ap->length); - orig_offset = ap->offset; - orig_length = ap->length; - - mp = ap->ip->i_mount; + struct xfs_mount *mp = args->mp; + xfs_extlen_t align = 0; /* minimum allocation alignment */ + int stripe_align = 0; /* stripe alignment for allocation is determined by mount parameters */ - stripe_align = 0; if (mp->m_swidth && (mp->m_flags & XFS_MOUNT_SWALLOC)) stripe_align = mp->m_swidth; else if (mp->m_dalign) @@ -3501,13 +3475,171 @@ xfs_bmap_btalloc( else if (ap->datatype & XFS_ALLOC_USERDATA) align = xfs_get_extsz_hint(ap->ip); if (align) { - error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev, - align, 0, ap->eof, 0, ap->conv, - &ap->offset, &ap->length); - ASSERT(!error); + if (xfs_bmap_extsize_align(mp, &ap->got, &ap->prev, align, 0, + ap->eof, 0, ap->conv, &ap->offset, + &ap->length)) + ASSERT(0); ASSERT(ap->length); } + /* apply extent size hints if obtained earlier */ + if (align) { + args->prod = align; + div_u64_rem(ap->offset, args->prod, &args->mod); + if (args->mod) + args->mod = args->prod - args->mod; + } else if (mp->m_sb.sb_blocksize >= PAGE_SIZE) { + args->prod = 1; + args->mod = 0; + } else { + args->prod = PAGE_SIZE >> mp->m_sb.sb_blocklog; + div_u64_rem(ap->offset, args->prod, &args->mod); + if (args->mod) + args->mod = args->prod - args->mod; + } + + return stripe_align; +} + +static void +xfs_bmap_process_allocated_extent( + struct xfs_bmalloca *ap, + struct xfs_alloc_arg *args, + xfs_fileoff_t orig_offset, + xfs_extlen_t orig_length) +{ + int nullfb; + + nullfb = ap->tp->t_firstblock == NULLFSBLOCK; + + /* + * check the allocation happened at the same or higher AG than + * the first block that was allocated. + */ + ASSERT(nullfb || + XFS_FSB_TO_AGNO(args->mp, ap->tp->t_firstblock) <= + XFS_FSB_TO_AGNO(args->mp, args->fsbno)); + + ap->blkno = args->fsbno; + if (nullfb) + ap->tp->t_firstblock = args->fsbno; + ap->length = args->len; + /* + * If the extent size hint is active, we tried to round the + * caller's allocation request offset down to extsz and the + * length up to another extsz boundary. If we found a free + * extent we mapped it in starting at this new offset. If the + * newly mapped space isn't long enough to cover any of the + * range of offsets that was originally requested, move the + * mapping up so that we can fill as much of the caller's + * original request as possible. Free space is apparently + * very fragmented so we're unlikely to be able to satisfy the + * hints anyway. + */ + if (ap->length <= orig_length) + ap->offset = orig_offset; + else if (ap->offset + ap->length < orig_offset + orig_length) + ap->offset = orig_offset + orig_length - ap->length; + xfs_bmap_btalloc_accounting(ap, args); +} + +#ifdef DEBUG +static int +xfs_bmap_exact_minlen_extent_alloc( + struct xfs_bmalloca *ap) +{ + struct xfs_mount *mp = ap->ip->i_mount; + struct xfs_alloc_arg args = { .tp = ap->tp, .mp = mp }; + xfs_fileoff_t orig_offset; + xfs_extlen_t orig_length; + int error; + + ASSERT(ap->length); + + if (ap->minlen != 1) { + ap->blkno = NULLFSBLOCK; + ap->length = 0; + return 0; + } + + orig_offset = ap->offset; + orig_length = ap->length; + + args.alloc_minlen_only = 1; + + xfs_bmap_compute_alignments(ap, &args); + + if (ap->tp->t_firstblock == NULLFSBLOCK) { + /* + * Unlike the longest extent available in an AG, we don't track + * the length of an AG's shortest extent. + * XFS_ERRTAG_BMAP_ALLOC_MINLEN_EXTENT is a debug only knob and + * hence we can afford to start traversing from the 0th AG since + * we need not be concerned about a drop in performance in + * "debug only" code paths. + */ + ap->blkno = XFS_AGB_TO_FSB(mp, 0, 0); + } else { + ap->blkno = ap->tp->t_firstblock; + } + + args.fsbno = ap->blkno; + args.oinfo = XFS_RMAP_OINFO_SKIP_UPDATE; + args.type = XFS_ALLOCTYPE_FIRST_AG; + args.total = args.minlen = args.maxlen = ap->minlen; + + args.alignment = 1; + args.minalignslop = 0; + + args.minleft = ap->minleft; + args.wasdel = ap->wasdel; + args.resv = XFS_AG_RESV_NONE; + args.datatype = ap->datatype; + + error = xfs_alloc_vextent(&args); + if (error) + return error; + + if (args.fsbno != NULLFSBLOCK) { + xfs_bmap_process_allocated_extent(ap, &args, orig_offset, + orig_length); + } else { + ap->blkno = NULLFSBLOCK; + ap->length = 0; + } + + return 0; +} +#else + +#define xfs_bmap_exact_minlen_extent_alloc(bma) (-EFSCORRUPTED) + +#endif + +STATIC int +xfs_bmap_btalloc( + struct xfs_bmalloca *ap) +{ + struct xfs_mount *mp = ap->ip->i_mount; + struct xfs_alloc_arg args = { .tp = ap->tp, .mp = mp }; + xfs_alloctype_t atype = 0; + xfs_agnumber_t fb_agno; /* ag number of ap->firstblock */ + xfs_agnumber_t ag; + xfs_fileoff_t orig_offset; + xfs_extlen_t orig_length; + xfs_extlen_t blen; + xfs_extlen_t nextminlen = 0; + int nullfb; /* true if ap->firstblock isn't set */ + int isaligned; + int tryagain; + int error; + int stripe_align; + + ASSERT(ap->length); + orig_offset = ap->offset; + orig_length = ap->length; + + stripe_align = xfs_bmap_compute_alignments(ap, &args); nullfb = ap->tp->t_firstblock == NULLFSBLOCK; fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, @@ -3538,9 +3670,6 @@ xfs_bmap_btalloc( * Normal allocation, done through xfs_alloc_vextent. */ tryagain = isaligned = 0; - memset(&args, 0, sizeof(args)); - args.tp = ap->tp; - args.mp = mp; args.fsbno = ap->blkno; args.oinfo = XFS_RMAP_OINFO_SKIP_UPDATE; @@ -3571,21 +3700,7 @@ xfs_bmap_btalloc( args.total = ap->total; args.minlen = ap->minlen; } - /* apply extent size hints if obtained earlier */ - if (align) { - args.prod = align; - div_u64_rem(ap->offset, args.prod, &args.mod); - if (args.mod) - args.mod = args.prod - args.mod; - } else if (mp->m_sb.sb_blocksize >= PAGE_SIZE) { - args.prod = 1; - args.mod = 0; - } else { - args.prod = PAGE_SIZE >> mp->m_sb.sb_blocklog; - div_u64_rem(ap->offset, args.prod, &args.mod); - if (args.mod) - args.mod = args.prod - args.mod; - } + /* * If we are not low on available data blocks, and the underlying * logical volume manager is a stripe, and the file offset is zero then @@ -3687,37 +3802,10 @@ xfs_bmap_btalloc( return error; ap->tp->t_flags |= XFS_TRANS_LOWMODE; } + if (args.fsbno != NULLFSBLOCK) { - /* - * check the allocation happened at the same or higher AG than - * the first block that was allocated. - */ - ASSERT(ap->tp->t_firstblock == NULLFSBLOCK || - XFS_FSB_TO_AGNO(mp, ap->tp->t_firstblock) <= - XFS_FSB_TO_AGNO(mp, args.fsbno)); - - ap->blkno = args.fsbno; - if (ap->tp->t_firstblock == NULLFSBLOCK) - ap->tp->t_firstblock = args.fsbno; - ASSERT(nullfb || fb_agno <= args.agno); - ap->length = args.len; - /* - * If the extent size hint is active, we tried to round the - * caller's allocation request offset down to extsz and the - * length up to another extsz boundary. If we found a free - * extent we mapped it in starting at this new offset. If the - * newly mapped space isn't long enough to cover any of the - * range of offsets that was originally requested, move the - * mapping up so that we can fill as much of the caller's - * original request as possible. Free space is apparently - * very fragmented so we're unlikely to be able to satisfy the - * hints anyway. - */ - if (ap->length <= orig_length) - ap->offset = orig_offset; - else if (ap->offset + ap->length < orig_offset + orig_length) - ap->offset = orig_offset + orig_length - ap->length; - xfs_bmap_btalloc_accounting(ap, &args); + xfs_bmap_process_allocated_extent(ap, &args, orig_offset, + orig_length); } else { ap->blkno = NULLFSBLOCK; ap->length = 0; @@ -4001,8 +4089,7 @@ xfs_bmapi_reserve_delalloc( * blocks. This number gets adjusted later. We return if we haven't * allocated blocks already inside this loop. */ - error = xfs_trans_reserve_quota_nblks(NULL, ip, (long)alen, 0, - XFS_QMOPT_RES_REGBLKS); + error = xfs_quota_reserve_blkres(ip, alen); if (error) return error; @@ -4048,8 +4135,7 @@ out_unreserve_blocks: xfs_mod_fdblocks(mp, alen, false); out_unreserve_quota: if (XFS_IS_QUOTA_ON(mp)) - xfs_trans_unreserve_quota_nblks(NULL, ip, (long)alen, 0, - XFS_QMOPT_RES_REGBLKS); + xfs_quota_unreserve_blkres(ip, alen); return error; } @@ -4083,6 +4169,10 @@ xfs_bmap_alloc_userdata( return xfs_bmap_rtalloc(bma); } + if (unlikely(XFS_TEST_ERROR(false, mp, + XFS_ERRTAG_BMAP_ALLOC_MINLEN_EXTENT))) + return xfs_bmap_exact_minlen_extent_alloc(bma); + return xfs_bmap_btalloc(bma); } @@ -4119,10 +4209,15 @@ xfs_bmapi_allocate( else bma->minlen = 1; - if (bma->flags & XFS_BMAPI_METADATA) - error = xfs_bmap_btalloc(bma); - else + if (bma->flags & XFS_BMAPI_METADATA) { + if (unlikely(XFS_TEST_ERROR(false, mp, + XFS_ERRTAG_BMAP_ALLOC_MINLEN_EXTENT))) + error = xfs_bmap_exact_minlen_extent_alloc(bma); + else + error = xfs_bmap_btalloc(bma); + } else { error = xfs_bmap_alloc_userdata(bma); + } if (error || bma->blkno == NULLFSBLOCK) return error; @@ -4527,6 +4622,12 @@ xfs_bmapi_convert_delalloc( return error; xfs_ilock(ip, XFS_ILOCK_EXCL); + + error = xfs_iext_count_may_overflow(ip, whichfork, + XFS_IEXT_ADD_NOSPLIT_CNT); + if (error) + goto out_trans_cancel; + xfs_trans_ijoin(tp, ip, 0); if (!xfs_iext_lookup_extent(ip, ifp, offset_fsb, &bma.icur, &bma.got) || @@ -4826,9 +4927,8 @@ xfs_bmap_del_extent_delay( * sb counters as we might have to borrow some blocks for the * indirect block accounting. */ - error = xfs_trans_reserve_quota_nblks(NULL, ip, - -((long)del->br_blockcount), 0, - isrt ? XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS); + ASSERT(!isrt); + error = xfs_quota_unreserve_blkres(ip, del->br_blockcount); if (error) return error; ip->i_delayed_blks -= del->br_blockcount; @@ -5145,6 +5245,27 @@ xfs_bmap_del_extent_real( /* * Deleting the middle of the extent. */ + + /* + * For directories, -ENOSPC is returned since a directory entry + * remove operation must not fail due to low extent count + * availability. -ENOSPC will be handled by higher layers of XFS + * by letting the corresponding empty Data/Free blocks to linger + * until a future remove operation. Dabtree blocks would be + * swapped with the last block in the leaf space and then the + * new last block will be unmapped. + * + * The above logic also applies to the source directory entry of + * a rename operation. + */ + error = xfs_iext_count_may_overflow(ip, whichfork, 1); + if (error) { + ASSERT(S_ISDIR(VFS_I(ip)->i_mode) && + whichfork == XFS_DATA_FORK); + error = -ENOSPC; + goto done; + } + old = got; got.br_blockcount = del->br_startoff - got.br_startoff; diff --git a/fs/xfs/libxfs/xfs_btree.c b/fs/xfs/libxfs/xfs_btree.c index c4d7a9241dc3..b56ff451adce 100644 --- a/fs/xfs/libxfs/xfs_btree.c +++ b/fs/xfs/libxfs/xfs_btree.c @@ -353,20 +353,17 @@ xfs_btree_free_block( */ void xfs_btree_del_cursor( - xfs_btree_cur_t *cur, /* btree cursor */ - int error) /* del because of error */ + struct xfs_btree_cur *cur, /* btree cursor */ + int error) /* del because of error */ { - int i; /* btree level */ + int i; /* btree level */ /* - * Clear the buffer pointers, and release the buffers. - * If we're doing this in the face of an error, we - * need to make sure to inspect all of the entries - * in the bc_bufs array for buffers to be unlocked. - * This is because some of the btree code works from - * level n down to 0, and if we get an error along - * the way we won't have initialized all the entries - * down to 0. + * Clear the buffer pointers and release the buffers. If we're doing + * this because of an error, inspect all of the entries in the bc_bufs + * array for buffers to be unlocked. This is because some of the btree + * code works from level n down to 0, and if we get an error along the + * way we won't have initialized all the entries down to 0. */ for (i = 0; i < cur->bc_nlevels; i++) { if (cur->bc_bufs[i]) @@ -374,17 +371,11 @@ xfs_btree_del_cursor( else if (!error) break; } - /* - * Can't free a bmap cursor without having dealt with the - * allocated indirect blocks' accounting. - */ - ASSERT(cur->bc_btnum != XFS_BTNUM_BMAP || - cur->bc_ino.allocated == 0); - /* - * Free the cursor. - */ + + ASSERT(cur->bc_btnum != XFS_BTNUM_BMAP || cur->bc_ino.allocated == 0 || + XFS_FORCED_SHUTDOWN(cur->bc_mp)); if (unlikely(cur->bc_flags & XFS_BTREE_STAGING)) - kmem_free((void *)cur->bc_ops); + kmem_free(cur->bc_ops); kmem_cache_free(xfs_btree_cur_zone, cur); } diff --git a/fs/xfs/libxfs/xfs_dir2.h b/fs/xfs/libxfs/xfs_dir2.h index e55378640b05..d03e6098ded9 100644 --- a/fs/xfs/libxfs/xfs_dir2.h +++ b/fs/xfs/libxfs/xfs_dir2.h @@ -47,8 +47,6 @@ extern int xfs_dir_lookup(struct xfs_trans *tp, struct xfs_inode *dp, extern int xfs_dir_removename(struct xfs_trans *tp, struct xfs_inode *dp, struct xfs_name *name, xfs_ino_t ino, xfs_extlen_t tot); -extern bool xfs_dir2_sf_replace_needblock(struct xfs_inode *dp, - xfs_ino_t inum); extern int xfs_dir_replace(struct xfs_trans *tp, struct xfs_inode *dp, struct xfs_name *name, xfs_ino_t inum, xfs_extlen_t tot); diff --git a/fs/xfs/libxfs/xfs_dir2_sf.c b/fs/xfs/libxfs/xfs_dir2_sf.c index 2463b5d73447..8c4f76bba88b 100644 --- a/fs/xfs/libxfs/xfs_dir2_sf.c +++ b/fs/xfs/libxfs/xfs_dir2_sf.c @@ -1018,7 +1018,7 @@ xfs_dir2_sf_removename( /* * Check whether the sf dir replace operation need more blocks. */ -bool +static bool xfs_dir2_sf_replace_needblock( struct xfs_inode *dp, xfs_ino_t inum) diff --git a/fs/xfs/libxfs/xfs_errortag.h b/fs/xfs/libxfs/xfs_errortag.h index 53b305dea381..6ca9084b6934 100644 --- a/fs/xfs/libxfs/xfs_errortag.h +++ b/fs/xfs/libxfs/xfs_errortag.h @@ -56,7 +56,9 @@ #define XFS_ERRTAG_FORCE_SUMMARY_RECALC 33 #define XFS_ERRTAG_IUNLINK_FALLBACK 34 #define XFS_ERRTAG_BUF_IOERROR 35 -#define XFS_ERRTAG_MAX 36 +#define XFS_ERRTAG_REDUCE_MAX_IEXTENTS 36 +#define XFS_ERRTAG_BMAP_ALLOC_MINLEN_EXTENT 37 +#define XFS_ERRTAG_MAX 38 /* * Random factors for above tags, 1 means always, 2 means 1/2 time, etc. @@ -97,5 +99,7 @@ #define XFS_RANDOM_FORCE_SUMMARY_RECALC 1 #define XFS_RANDOM_IUNLINK_FALLBACK (XFS_RANDOM_DEFAULT/10) #define XFS_RANDOM_BUF_IOERROR XFS_RANDOM_DEFAULT +#define XFS_RANDOM_REDUCE_MAX_IEXTENTS 1 +#define XFS_RANDOM_BMAP_ALLOC_MINLEN_EXTENT 1 #endif /* __XFS_ERRORTAG_H_ */ diff --git a/fs/xfs/libxfs/xfs_fs.h b/fs/xfs/libxfs/xfs_fs.h index 2a2e3cfd94f0..6fad140d4c8e 100644 --- a/fs/xfs/libxfs/xfs_fs.h +++ b/fs/xfs/libxfs/xfs_fs.h @@ -250,6 +250,7 @@ typedef struct xfs_fsop_resblks { #define XFS_FSOP_GEOM_FLAGS_RMAPBT (1 << 19) /* reverse mapping btree */ #define XFS_FSOP_GEOM_FLAGS_REFLINK (1 << 20) /* files can share blocks */ #define XFS_FSOP_GEOM_FLAGS_BIGTIME (1 << 21) /* 64-bit nsec timestamps */ +#define XFS_FSOP_GEOM_FLAGS_INOBTCNT (1 << 22) /* inobt btree counter */ /* * Minimum and maximum sizes need for growth checks. diff --git a/fs/xfs/libxfs/xfs_inode_fork.c b/fs/xfs/libxfs/xfs_inode_fork.c index 7575de5cecb1..e080d7e07643 100644 --- a/fs/xfs/libxfs/xfs_inode_fork.c +++ b/fs/xfs/libxfs/xfs_inode_fork.c @@ -23,6 +23,8 @@ #include "xfs_da_btree.h" #include "xfs_dir2_priv.h" #include "xfs_attr_leaf.h" +#include "xfs_types.h" +#include "xfs_errortag.h" kmem_zone_t *xfs_ifork_zone; @@ -728,3 +730,28 @@ xfs_ifork_verify_local_attr( return 0; } + +int +xfs_iext_count_may_overflow( + struct xfs_inode *ip, + int whichfork, + int nr_to_add) +{ + struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); + uint64_t max_exts; + uint64_t nr_exts; + + if (whichfork == XFS_COW_FORK) + return 0; + + max_exts = (whichfork == XFS_ATTR_FORK) ? MAXAEXTNUM : MAXEXTNUM; + + if (XFS_TEST_ERROR(false, ip->i_mount, XFS_ERRTAG_REDUCE_MAX_IEXTENTS)) + max_exts = 10; + + nr_exts = ifp->if_nextents + nr_to_add; + if (nr_exts < ifp->if_nextents || nr_exts > max_exts) + return -EFBIG; + + return 0; +} diff --git a/fs/xfs/libxfs/xfs_inode_fork.h b/fs/xfs/libxfs/xfs_inode_fork.h index a4953e95c4f3..9e2137cd7372 100644 --- a/fs/xfs/libxfs/xfs_inode_fork.h +++ b/fs/xfs/libxfs/xfs_inode_fork.h @@ -35,6 +35,67 @@ struct xfs_ifork { #define XFS_IFBROOT 0x04 /* i_broot points to the bmap b-tree root */ /* + * Worst-case increase in the fork extent count when we're adding a single + * extent to a fork and there's no possibility of splitting an existing mapping. + */ +#define XFS_IEXT_ADD_NOSPLIT_CNT (1) + +/* + * Punching out an extent from the middle of an existing extent can cause the + * extent count to increase by 1. + * i.e. | Old extent | Hole | Old extent | + */ +#define XFS_IEXT_PUNCH_HOLE_CNT (1) + +/* + * Directory entry addition can cause the following, + * 1. Data block can be added/removed. + * A new extent can cause extent count to increase by 1. + * 2. Free disk block can be added/removed. + * Same behaviour as described above for Data block. + * 3. Dabtree blocks. + * XFS_DA_NODE_MAXDEPTH blocks can be added. Each of these can be new + * extents. Hence extent count can increase by XFS_DA_NODE_MAXDEPTH. + */ +#define XFS_IEXT_DIR_MANIP_CNT(mp) \ + ((XFS_DA_NODE_MAXDEPTH + 1 + 1) * (mp)->m_dir_geo->fsbcount) + +/* + * Adding/removing an xattr can cause XFS_DA_NODE_MAXDEPTH extents to + * be added. One extra extent for dabtree in case a local attr is + * large enough to cause a double split. It can also cause extent + * count to increase proportional to the size of a remote xattr's + * value. + */ +#define XFS_IEXT_ATTR_MANIP_CNT(rmt_blks) \ + (XFS_DA_NODE_MAXDEPTH + max(1, rmt_blks)) + +/* + * A write to a sub-interval of an existing unwritten extent causes the original + * extent to be split into 3 extents + * i.e. | Unwritten | Real | Unwritten | + * Hence extent count can increase by 2. + */ +#define XFS_IEXT_WRITE_UNWRITTEN_CNT (2) + + +/* + * Moving an extent to data fork can cause a sub-interval of an existing extent + * to be unmapped. This will increase extent count by 1. Mapping in the new + * extent can increase the extent count by 1 again i.e. + * | Old extent | New extent | Old extent | + * Hence number of extents increases by 2. + */ +#define XFS_IEXT_REFLINK_END_COW_CNT (2) + +/* + * Removing an initial range of source/donor file's extent and adding a new + * extent (from donor/source file) in its place will cause extent count to + * increase by 1. + */ +#define XFS_IEXT_SWAP_RMAP_CNT (1) + +/* * Fork handling. */ @@ -172,5 +233,7 @@ extern void xfs_ifork_init_cow(struct xfs_inode *ip); int xfs_ifork_verify_local_data(struct xfs_inode *ip); int xfs_ifork_verify_local_attr(struct xfs_inode *ip); +int xfs_iext_count_may_overflow(struct xfs_inode *ip, int whichfork, + int nr_to_add); #endif /* __XFS_INODE_FORK_H__ */ diff --git a/fs/xfs/libxfs/xfs_sb.c b/fs/xfs/libxfs/xfs_sb.c index bbda117e5d85..60e6d255e5e2 100644 --- a/fs/xfs/libxfs/xfs_sb.c +++ b/fs/xfs/libxfs/xfs_sb.c @@ -1138,6 +1138,8 @@ xfs_fs_geometry( geo->flags |= XFS_FSOP_GEOM_FLAGS_REFLINK; if (xfs_sb_version_hasbigtime(sbp)) geo->flags |= XFS_FSOP_GEOM_FLAGS_BIGTIME; + if (xfs_sb_version_hasinobtcounts(sbp)) + geo->flags |= XFS_FSOP_GEOM_FLAGS_INOBTCNT; if (xfs_sb_version_hassector(sbp)) geo->logsectsize = sbp->sb_logsectsize; else |