summaryrefslogtreecommitdiff
path: root/fs/xfs/xfs_qm.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/xfs/xfs_qm.c')
-rw-r--r--fs/xfs/xfs_qm.c187
1 files changed, 90 insertions, 97 deletions
diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c
index 61ee110b47d7..23ba84ec919a 100644
--- a/fs/xfs/xfs_qm.c
+++ b/fs/xfs/xfs_qm.c
@@ -134,6 +134,7 @@ xfs_qm_dqpurge(
dqp->q_flags |= XFS_DQFLAG_FREEING;
+ xfs_qm_dqunpin_wait(dqp);
xfs_dqflock(dqp);
/*
@@ -148,17 +149,29 @@ xfs_qm_dqpurge(
* We don't care about getting disk errors here. We need
* to purge this dquot anyway, so we go ahead regardless.
*/
- error = xfs_qm_dqflush(dqp, &bp);
+ error = xfs_dquot_use_attached_buf(dqp, &bp);
+ if (error == -EAGAIN) {
+ xfs_dqfunlock(dqp);
+ dqp->q_flags &= ~XFS_DQFLAG_FREEING;
+ goto out_unlock;
+ }
+ if (!bp)
+ goto out_funlock;
+
+ /*
+ * dqflush completes dqflock on error, and the bwrite ioend
+ * does it on success.
+ */
+ error = xfs_qm_dqflush(dqp, bp);
if (!error) {
error = xfs_bwrite(bp);
xfs_buf_relse(bp);
- } else if (error == -EAGAIN) {
- dqp->q_flags &= ~XFS_DQFLAG_FREEING;
- goto out_unlock;
}
xfs_dqflock(dqp);
}
+ xfs_dquot_detach_buf(dqp);
+out_funlock:
ASSERT(atomic_read(&dqp->q_pincount) == 0);
ASSERT(xlog_is_shutdown(dqp->q_logitem.qli_item.li_log) ||
!test_bit(XFS_LI_IN_AIL, &dqp->q_logitem.qli_item.li_flags));
@@ -218,10 +231,10 @@ xfs_qm_unmount_rt(
if (!rtg)
return;
- if (rtg->rtg_inodes[XFS_RTGI_BITMAP])
- xfs_qm_dqdetach(rtg->rtg_inodes[XFS_RTGI_BITMAP]);
- if (rtg->rtg_inodes[XFS_RTGI_SUMMARY])
- xfs_qm_dqdetach(rtg->rtg_inodes[XFS_RTGI_SUMMARY]);
+ if (rtg_bitmap(rtg))
+ xfs_qm_dqdetach(rtg_bitmap(rtg));
+ if (rtg_summary(rtg))
+ xfs_qm_dqdetach(rtg_summary(rtg));
xfs_rtgroup_rele(rtg);
}
@@ -241,6 +254,10 @@ xfs_qm_destroy_quotainos(
xfs_irele(qi->qi_pquotaip);
qi->qi_pquotaip = NULL;
}
+ if (qi->qi_dirip) {
+ xfs_irele(qi->qi_dirip);
+ qi->qi_dirip = NULL;
+ }
}
/*
@@ -412,6 +429,8 @@ void
xfs_qm_dqdetach(
xfs_inode_t *ip)
{
+ if (xfs_is_metadir_inode(ip))
+ return;
if (!(ip->i_udquot || ip->i_gdquot || ip->i_pdquot))
return;
@@ -447,6 +466,7 @@ xfs_qm_dquot_isolate(
struct xfs_dquot *dqp = container_of(item,
struct xfs_dquot, q_lru);
struct xfs_qm_isolate *isol = arg;
+ enum lru_status ret = LRU_SKIP;
if (!xfs_dqlock_nowait(dqp))
goto out_miss_busy;
@@ -460,6 +480,16 @@ xfs_qm_dquot_isolate(
goto out_miss_unlock;
/*
+ * If the dquot is pinned or dirty, rotate it to the end of the LRU to
+ * give some time for it to be cleaned before we try to isolate it
+ * again.
+ */
+ ret = LRU_ROTATE;
+ if (XFS_DQ_IS_DIRTY(dqp) || atomic_read(&dqp->q_pincount) > 0) {
+ goto out_miss_unlock;
+ }
+
+ /*
* This dquot has acquired a reference in the meantime remove it from
* the freelist and try again.
*/
@@ -474,30 +504,15 @@ xfs_qm_dquot_isolate(
}
/*
- * If the dquot is dirty, flush it. If it's already being flushed, just
- * skip it so there is time for the IO to complete before we try to
- * reclaim it again on the next LRU pass.
+ * The dquot may still be under IO, in which case the flush lock will be
+ * held. If we can't get the flush lock now, just skip over the dquot as
+ * if it was dirty.
*/
if (!xfs_dqflock_nowait(dqp))
goto out_miss_unlock;
- if (XFS_DQ_IS_DIRTY(dqp)) {
- struct xfs_buf *bp = NULL;
- int error;
-
- trace_xfs_dqreclaim_dirty(dqp);
-
- /* we have to drop the LRU lock to flush the dquot */
- spin_unlock(&lru->lock);
-
- error = xfs_qm_dqflush(dqp, &bp);
- if (error)
- goto out_unlock_dirty;
-
- xfs_buf_delwri_queue(bp, &isol->buffers);
- xfs_buf_relse(bp);
- goto out_unlock_dirty;
- }
+ ASSERT(!XFS_DQ_IS_DIRTY(dqp));
+ xfs_dquot_detach_buf(dqp);
xfs_dqfunlock(dqp);
/*
@@ -518,13 +533,7 @@ out_miss_unlock:
out_miss_busy:
trace_xfs_dqreclaim_busy(dqp);
XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaim_misses);
- return LRU_SKIP;
-
-out_unlock_dirty:
- trace_xfs_dqreclaim_busy(dqp);
- XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaim_misses);
- xfs_dqunlock(dqp);
- return LRU_RETRY;
+ return ret;
}
static unsigned long
@@ -646,17 +655,13 @@ xfs_qm_init_timelimits(
static int
xfs_qm_load_metadir_qinos(
struct xfs_mount *mp,
- struct xfs_quotainfo *qi,
- struct xfs_inode **dpp)
+ struct xfs_quotainfo *qi)
{
struct xfs_trans *tp;
int error;
- error = xfs_trans_alloc_empty(mp, &tp);
- if (error)
- return error;
-
- error = xfs_dqinode_load_parent(tp, dpp);
+ tp = xfs_trans_alloc_empty(mp);
+ error = xfs_dqinode_load_parent(tp, &qi->qi_dirip);
if (error == -ENOENT) {
/* no quota dir directory, but we'll create one later */
error = 0;
@@ -666,21 +671,21 @@ xfs_qm_load_metadir_qinos(
goto out_trans;
if (XFS_IS_UQUOTA_ON(mp)) {
- error = xfs_dqinode_load(tp, *dpp, XFS_DQTYPE_USER,
+ error = xfs_dqinode_load(tp, qi->qi_dirip, XFS_DQTYPE_USER,
&qi->qi_uquotaip);
if (error && error != -ENOENT)
goto out_trans;
}
if (XFS_IS_GQUOTA_ON(mp)) {
- error = xfs_dqinode_load(tp, *dpp, XFS_DQTYPE_GROUP,
+ error = xfs_dqinode_load(tp, qi->qi_dirip, XFS_DQTYPE_GROUP,
&qi->qi_gquotaip);
if (error && error != -ENOENT)
goto out_trans;
}
if (XFS_IS_PQUOTA_ON(mp)) {
- error = xfs_dqinode_load(tp, *dpp, XFS_DQTYPE_PROJ,
+ error = xfs_dqinode_load(tp, qi->qi_dirip, XFS_DQTYPE_PROJ,
&qi->qi_pquotaip);
if (error && error != -ENOENT)
goto out_trans;
@@ -696,34 +701,40 @@ out_trans:
STATIC int
xfs_qm_create_metadir_qinos(
struct xfs_mount *mp,
- struct xfs_quotainfo *qi,
- struct xfs_inode **dpp)
+ struct xfs_quotainfo *qi)
{
int error;
- if (!*dpp) {
- error = xfs_dqinode_mkdir_parent(mp, dpp);
+ if (!qi->qi_dirip) {
+ error = xfs_dqinode_mkdir_parent(mp, &qi->qi_dirip);
if (error && error != -EEXIST)
return error;
+ /*
+ * If the /quotas dirent points to an inode that isn't
+ * loadable, qi_dirip will be NULL but mkdir_parent will return
+ * -EEXIST. In this case the metadir is corrupt, so bail out.
+ */
+ if (XFS_IS_CORRUPT(mp, qi->qi_dirip == NULL))
+ return -EFSCORRUPTED;
}
if (XFS_IS_UQUOTA_ON(mp) && !qi->qi_uquotaip) {
- error = xfs_dqinode_metadir_create(*dpp, XFS_DQTYPE_USER,
- &qi->qi_uquotaip);
+ error = xfs_dqinode_metadir_create(qi->qi_dirip,
+ XFS_DQTYPE_USER, &qi->qi_uquotaip);
if (error)
return error;
}
if (XFS_IS_GQUOTA_ON(mp) && !qi->qi_gquotaip) {
- error = xfs_dqinode_metadir_create(*dpp, XFS_DQTYPE_GROUP,
- &qi->qi_gquotaip);
+ error = xfs_dqinode_metadir_create(qi->qi_dirip,
+ XFS_DQTYPE_GROUP, &qi->qi_gquotaip);
if (error)
return error;
}
if (XFS_IS_PQUOTA_ON(mp) && !qi->qi_pquotaip) {
- error = xfs_dqinode_metadir_create(*dpp, XFS_DQTYPE_PROJ,
- &qi->qi_pquotaip);
+ error = xfs_dqinode_metadir_create(qi->qi_dirip,
+ XFS_DQTYPE_PROJ, &qi->qi_pquotaip);
if (error)
return error;
}
@@ -768,7 +779,6 @@ xfs_qm_init_metadir_qinos(
struct xfs_mount *mp)
{
struct xfs_quotainfo *qi = mp->m_quotainfo;
- struct xfs_inode *dp = NULL;
int error;
if (!xfs_has_quota(mp)) {
@@ -777,20 +787,22 @@ xfs_qm_init_metadir_qinos(
return error;
}
- error = xfs_qm_load_metadir_qinos(mp, qi, &dp);
+ error = xfs_qm_load_metadir_qinos(mp, qi);
if (error)
goto out_err;
- error = xfs_qm_create_metadir_qinos(mp, qi, &dp);
+ error = xfs_qm_create_metadir_qinos(mp, qi);
if (error)
goto out_err;
- xfs_irele(dp);
+ /* The only user of the quota dir inode is online fsck */
+#if !IS_ENABLED(CONFIG_XFS_ONLINE_SCRUB)
+ xfs_irele(qi->qi_dirip);
+ qi->qi_dirip = NULL;
+#endif
return 0;
out_err:
xfs_qm_destroy_quotainos(mp->m_quotainfo);
- if (dp)
- xfs_irele(dp);
return error;
}
@@ -1304,6 +1316,10 @@ xfs_qm_quotacheck_dqadjust(
return error;
}
+ error = xfs_dquot_attach_buf(NULL, dqp);
+ if (error)
+ return error;
+
trace_xfs_dqadjust(dqp);
/*
@@ -1446,7 +1462,6 @@ xfs_qm_flush_one(
struct xfs_dquot *dqp,
void *data)
{
- struct xfs_mount *mp = dqp->q_mount;
struct list_head *buffer_list = data;
struct xfs_buf *bp = NULL;
int error = 0;
@@ -1457,40 +1472,20 @@ xfs_qm_flush_one(
if (!XFS_DQ_IS_DIRTY(dqp))
goto out_unlock;
- /*
- * The only way the dquot is already flush locked by the time quotacheck
- * gets here is if reclaim flushed it before the dqadjust walk dirtied
- * it for the final time. Quotacheck collects all dquot bufs in the
- * local delwri queue before dquots are dirtied, so reclaim can't have
- * possibly queued it for I/O. The only way out is to push the buffer to
- * cycle the flush lock.
- */
- if (!xfs_dqflock_nowait(dqp)) {
- /* buf is pinned in-core by delwri list */
- error = xfs_buf_incore(mp->m_ddev_targp, dqp->q_blkno,
- mp->m_quotainfo->qi_dqchunklen, 0, &bp);
- if (error)
- goto out_unlock;
-
- if (!(bp->b_flags & _XBF_DELWRI_Q)) {
- error = -EAGAIN;
- xfs_buf_relse(bp);
- goto out_unlock;
- }
- xfs_buf_unlock(bp);
-
- xfs_buf_delwri_pushbuf(bp, buffer_list);
- xfs_buf_rele(bp);
-
- error = -EAGAIN;
- goto out_unlock;
- }
+ xfs_qm_dqunpin_wait(dqp);
+ xfs_dqflock(dqp);
- error = xfs_qm_dqflush(dqp, &bp);
+ error = xfs_dquot_use_attached_buf(dqp, &bp);
if (error)
goto out_unlock;
+ if (!bp) {
+ error = -EFSCORRUPTED;
+ goto out_unlock;
+ }
- xfs_buf_delwri_queue(bp, buffer_list);
+ error = xfs_qm_dqflush(dqp, bp);
+ if (!error)
+ xfs_buf_delwri_queue(bp, buffer_list);
xfs_buf_relse(bp);
out_unlock:
xfs_dqunlock(dqp);
@@ -1665,7 +1660,8 @@ xfs_qm_mount_quotas(
* immediately. We only support rtquota if rtgroups are enabled to
* avoid problems with older kernels.
*/
- if (mp->m_sb.sb_rextents && !xfs_has_rtgroups(mp)) {
+ if (mp->m_sb.sb_rextents &&
+ (!xfs_has_rtgroups(mp) || xfs_has_zoned(mp))) {
xfs_notice(mp, "Cannot turn on quotas for realtime filesystem");
mp->m_qflags = 0;
goto write_changes;
@@ -1756,10 +1752,7 @@ xfs_qm_qino_load(
struct xfs_inode *dp = NULL;
int error;
- error = xfs_trans_alloc_empty(mp, &tp);
- if (error)
- return error;
-
+ tp = xfs_trans_alloc_empty(mp);
if (xfs_has_metadir(mp)) {
error = xfs_dqinode_load_parent(tp, &dp);
if (error)