diff options
Diffstat (limited to 'fs/gfs2/quota.c')
-rw-r--r-- | fs/gfs2/quota.c | 433 |
1 files changed, 235 insertions, 198 deletions
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c index c537e1d02cf3..2e6bc77f4f81 100644 --- a/fs/gfs2/quota.c +++ b/fs/gfs2/quota.c @@ -193,13 +193,26 @@ static unsigned long gfs2_qd_shrink_count(struct shrinker *shrink, return vfs_pressure_ratio(list_lru_shrink_count(&gfs2_qd_lru, sc)); } -struct shrinker gfs2_qd_shrinker = { - .count_objects = gfs2_qd_shrink_count, - .scan_objects = gfs2_qd_shrink_scan, - .seeks = DEFAULT_SEEKS, - .flags = SHRINKER_NUMA_AWARE, -}; +static struct shrinker *gfs2_qd_shrinker; + +int __init gfs2_qd_shrinker_init(void) +{ + gfs2_qd_shrinker = shrinker_alloc(SHRINKER_NUMA_AWARE, "gfs2-qd"); + if (!gfs2_qd_shrinker) + return -ENOMEM; + + gfs2_qd_shrinker->count_objects = gfs2_qd_shrink_count; + gfs2_qd_shrinker->scan_objects = gfs2_qd_shrink_scan; + shrinker_register(gfs2_qd_shrinker); + + return 0; +} + +void gfs2_qd_shrinker_exit(void) +{ + shrinker_free(gfs2_qd_shrinker); +} static u64 qd2index(struct gfs2_quota_data *qd) { @@ -255,7 +268,7 @@ static struct gfs2_quota_data *gfs2_qd_search_bucket(unsigned int hash, if (qd->qd_sbd != sdp) continue; if (lockref_get_not_dead(&qd->qd_lockref)) { - list_lru_del(&gfs2_qd_lru, &qd->qd_lru); + list_lru_del_obj(&gfs2_qd_lru, &qd->qd_lru); return qd; } } @@ -303,11 +316,11 @@ static int qd_get(struct gfs2_sbd *sdp, struct kqid qid, } -static void qd_hold(struct gfs2_quota_data *qd) +static void __qd_hold(struct gfs2_quota_data *qd) { struct gfs2_sbd *sdp = qd->qd_sbd; - gfs2_assert(sdp, !__lockref_is_dead(&qd->qd_lockref)); - lockref_get(&qd->qd_lockref); + gfs2_assert(sdp, qd->qd_lockref.count > 0); + qd->qd_lockref.count++; } static void qd_put(struct gfs2_quota_data *qd) @@ -328,7 +341,7 @@ static void qd_put(struct gfs2_quota_data *qd) } qd->qd_lockref.count = 0; - list_lru_add(&gfs2_qd_lru, &qd->qd_lru); + list_lru_add_obj(&gfs2_qd_lru, &qd->qd_lru); spin_unlock(&qd->qd_lockref.lock); } @@ -384,16 +397,17 @@ static int bh_get(struct gfs2_quota_data *qd) struct inode *inode = sdp->sd_qc_inode; struct gfs2_inode *ip = GFS2_I(inode); unsigned int block, offset; - struct buffer_head *bh; + struct buffer_head *bh = NULL; struct iomap iomap = { }; int error; - mutex_lock(&sdp->sd_quota_mutex); - - if (qd->qd_bh_count++) { - mutex_unlock(&sdp->sd_quota_mutex); + spin_lock(&qd->qd_lockref.lock); + if (qd->qd_bh_count) { + qd->qd_bh_count++; + spin_unlock(&qd->qd_lockref.lock); return 0; } + spin_unlock(&qd->qd_lockref.lock); block = qd->qd_slot / sdp->sd_qc_per_block; offset = qd->qd_slot % sdp->sd_qc_per_block; @@ -402,66 +416,76 @@ static int bh_get(struct gfs2_quota_data *qd) (loff_t)block << inode->i_blkbits, i_blocksize(inode), &iomap); if (error) - goto fail; + return error; error = -ENOENT; if (iomap.type != IOMAP_MAPPED) - goto fail; + return error; error = gfs2_meta_read(ip->i_gl, iomap.addr >> inode->i_blkbits, DIO_WAIT, 0, &bh); if (error) - goto fail; + return error; error = -EIO; if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC)) - goto fail_brelse; - - qd->qd_bh = bh; - qd->qd_bh_qc = (struct gfs2_quota_change *) - (bh->b_data + sizeof(struct gfs2_meta_header) + - offset * sizeof(struct gfs2_quota_change)); - - mutex_unlock(&sdp->sd_quota_mutex); + goto out; - return 0; + spin_lock(&qd->qd_lockref.lock); + if (qd->qd_bh == NULL) { + qd->qd_bh = bh; + qd->qd_bh_qc = (struct gfs2_quota_change *) + (bh->b_data + sizeof(struct gfs2_meta_header) + + offset * sizeof(struct gfs2_quota_change)); + bh = NULL; + } + qd->qd_bh_count++; + spin_unlock(&qd->qd_lockref.lock); + error = 0; -fail_brelse: +out: brelse(bh); -fail: - qd->qd_bh_count--; - mutex_unlock(&sdp->sd_quota_mutex); return error; } static void bh_put(struct gfs2_quota_data *qd) { struct gfs2_sbd *sdp = qd->qd_sbd; + struct buffer_head *bh = NULL; - mutex_lock(&sdp->sd_quota_mutex); + spin_lock(&qd->qd_lockref.lock); gfs2_assert(sdp, qd->qd_bh_count); if (!--qd->qd_bh_count) { - brelse(qd->qd_bh); + bh = qd->qd_bh; qd->qd_bh = NULL; qd->qd_bh_qc = NULL; } - mutex_unlock(&sdp->sd_quota_mutex); + spin_unlock(&qd->qd_lockref.lock); + brelse(bh); } static bool qd_grab_sync(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd, u64 sync_gen) { + bool ret = false; + + spin_lock(&qd->qd_lockref.lock); if (test_bit(QDF_LOCKED, &qd->qd_flags) || !test_bit(QDF_CHANGE, &qd->qd_flags) || qd->qd_sync_gen >= sync_gen) - return false; + goto out; - if (!lockref_get_not_dead(&qd->qd_lockref)) - return false; + if (__lockref_is_dead(&qd->qd_lockref)) + goto out; + qd->qd_lockref.count++; list_move_tail(&qd->qd_list, &sdp->sd_quota_list); set_bit(QDF_LOCKED, &qd->qd_flags); qd->qd_change_sync = qd->qd_change; slot_hold(qd); - return true; + ret = true; + +out: + spin_unlock(&qd->qd_lockref.lock); + return ret; } static void qd_ungrab_sync(struct gfs2_quota_data *qd) @@ -471,40 +495,6 @@ static void qd_ungrab_sync(struct gfs2_quota_data *qd) qd_put(qd); } -static int qd_fish(struct gfs2_sbd *sdp, struct gfs2_quota_data **qdp) -{ - struct gfs2_quota_data *qd = NULL, *iter; - int error; - - *qdp = NULL; - - if (sb_rdonly(sdp->sd_vfs)) - return 0; - - spin_lock(&qd_lock); - - list_for_each_entry(iter, &sdp->sd_quota_list, qd_list) { - if (qd_grab_sync(sdp, iter, sdp->sd_quota_sync_gen)) { - qd = iter; - break; - } - } - - spin_unlock(&qd_lock); - - if (qd) { - error = bh_get(qd); - if (error) { - qd_ungrab_sync(qd); - return error; - } - } - - *qdp = qd; - - return 0; -} - static void qdsb_put(struct gfs2_quota_data *qd) { bh_put(qd); @@ -514,8 +504,10 @@ static void qdsb_put(struct gfs2_quota_data *qd) static void qd_unlock(struct gfs2_quota_data *qd) { + spin_lock(&qd->qd_lockref.lock); gfs2_assert_warn(qd->qd_sbd, test_bit(QDF_LOCKED, &qd->qd_flags)); clear_bit(QDF_LOCKED, &qd->qd_flags); + spin_unlock(&qd->qd_lockref.lock); qdsb_put(qd); } @@ -684,41 +676,52 @@ static void do_qc(struct gfs2_quota_data *qd, s64 change) struct gfs2_sbd *sdp = qd->qd_sbd; struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode); struct gfs2_quota_change *qc = qd->qd_bh_qc; + bool needs_put = false; s64 x; - mutex_lock(&sdp->sd_quota_mutex); gfs2_trans_add_meta(ip->i_gl, qd->qd_bh); - if (!test_bit(QDF_CHANGE, &qd->qd_flags)) { - qc->qc_change = 0; + /* + * The QDF_CHANGE flag indicates that the slot in the quota change file + * is used. Here, we use the value of qc->qc_change when the slot is + * used, and we assume a value of 0 otherwise. + */ + + spin_lock(&qd->qd_lockref.lock); + + x = 0; + if (test_bit(QDF_CHANGE, &qd->qd_flags)) + x = be64_to_cpu(qc->qc_change); + x += change; + qd->qd_change += change; + + if (!x && test_bit(QDF_CHANGE, &qd->qd_flags)) { + /* The slot in the quota change file becomes unused. */ + clear_bit(QDF_CHANGE, &qd->qd_flags); + qc->qc_flags = 0; + qc->qc_id = 0; + needs_put = true; + } else if (x && !test_bit(QDF_CHANGE, &qd->qd_flags)) { + /* The slot in the quota change file becomes used. */ + set_bit(QDF_CHANGE, &qd->qd_flags); + __qd_hold(qd); + slot_hold(qd); + qc->qc_flags = 0; if (qd->qd_id.type == USRQUOTA) qc->qc_flags = cpu_to_be32(GFS2_QCF_USER); qc->qc_id = cpu_to_be32(from_kqid(&init_user_ns, qd->qd_id)); } - - x = be64_to_cpu(qc->qc_change) + change; qc->qc_change = cpu_to_be64(x); - spin_lock(&qd_lock); - qd->qd_change = x; - spin_unlock(&qd_lock); + spin_unlock(&qd->qd_lockref.lock); - if (!x) { - gfs2_assert_warn(sdp, test_bit(QDF_CHANGE, &qd->qd_flags)); - clear_bit(QDF_CHANGE, &qd->qd_flags); - qc->qc_flags = 0; - qc->qc_id = 0; + if (needs_put) { slot_put(qd); qd_put(qd); - } else if (!test_and_set_bit(QDF_CHANGE, &qd->qd_flags)) { - qd_hold(qd); - slot_hold(qd); } - if (change < 0) /* Reset quiet flag if we freed some blocks */ clear_bit(QDF_QMSG_QUIET, &qd->qd_flags); - mutex_unlock(&sdp->sd_quota_mutex); } static int gfs2_write_buf_to_page(struct gfs2_sbd *sdp, unsigned long index, @@ -727,7 +730,7 @@ static int gfs2_write_buf_to_page(struct gfs2_sbd *sdp, unsigned long index, struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode); struct inode *inode = &ip->i_inode; struct address_space *mapping = inode->i_mapping; - struct page *page; + struct folio *folio; struct buffer_head *bh; u64 blk; unsigned bsize = sdp->sd_sb.sb_bsize, bnum = 0, boff = 0; @@ -736,15 +739,15 @@ static int gfs2_write_buf_to_page(struct gfs2_sbd *sdp, unsigned long index, blk = index << (PAGE_SHIFT - sdp->sd_sb.sb_bsize_shift); boff = off % bsize; - page = grab_cache_page(mapping, index); - if (!page) - return -ENOMEM; - if (!page_has_buffers(page)) - create_empty_buffers(page, bsize, 0); + folio = filemap_grab_folio(mapping, index); + if (IS_ERR(folio)) + return PTR_ERR(folio); + bh = folio_buffers(folio); + if (!bh) + bh = create_empty_buffers(folio, bsize, 0); - bh = page_buffers(page); - for(;;) { - /* Find the beginning block within the page */ + for (;;) { + /* Find the beginning block within the folio */ if (pg_off >= ((bnum * bsize) + bsize)) { bh = bh->b_this_page; bnum++; @@ -757,9 +760,10 @@ static int gfs2_write_buf_to_page(struct gfs2_sbd *sdp, unsigned long index, goto unlock_out; /* If it's a newly allocated disk block, zero it */ if (buffer_new(bh)) - zero_user(page, bnum * bsize, bh->b_size); + folio_zero_range(folio, bnum * bsize, + bh->b_size); } - if (PageUptodate(page)) + if (folio_test_uptodate(folio)) set_buffer_uptodate(bh); if (bh_read(bh, REQ_META | REQ_PRIO) < 0) goto unlock_out; @@ -775,17 +779,17 @@ static int gfs2_write_buf_to_page(struct gfs2_sbd *sdp, unsigned long index, break; } - /* Write to the page, now that we have setup the buffer(s) */ - memcpy_to_page(page, off, buf, bytes); - flush_dcache_page(page); - unlock_page(page); - put_page(page); + /* Write to the folio, now that we have setup the buffer(s) */ + memcpy_to_folio(folio, off, buf, bytes); + flush_dcache_folio(folio); + folio_unlock(folio); + folio_put(folio); return 0; unlock_out: - unlock_page(page); - put_page(page); + folio_unlock(folio); + folio_put(folio); return -EIO; } @@ -856,6 +860,7 @@ static int gfs2_adjust_quota(struct gfs2_sbd *sdp, loff_t loc, be64_add_cpu(&q.qu_value, change); if (((s64)be64_to_cpu(q.qu_value)) < 0) q.qu_value = 0; /* Never go negative on quota usage */ + spin_lock(&qd->qd_lockref.lock); qd->qd_qb.qb_value = q.qu_value; if (fdq) { if (fdq->d_fieldmask & QC_SPC_SOFT) { @@ -871,13 +876,14 @@ static int gfs2_adjust_quota(struct gfs2_sbd *sdp, loff_t loc, qd->qd_qb.qb_value = q.qu_value; } } + spin_unlock(&qd->qd_lockref.lock); err = gfs2_write_disk_quota(sdp, &q, loc); if (!err) { size = loc + sizeof(struct gfs2_quota); if (size > inode->i_size) i_size_write(inode, size); - inode->i_mtime = inode_set_ctime_current(inode); + inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode)); mark_inode_dirty(inode); set_bit(QDF_REFRESH, &qd->qd_flags); } @@ -885,11 +891,12 @@ static int gfs2_adjust_quota(struct gfs2_sbd *sdp, loff_t loc, return err; } -static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda) +static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda, + u64 sync_gen) { struct gfs2_sbd *sdp = (*qda)->qd_sbd; struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode); - struct gfs2_alloc_parms ap = { .aflags = 0, }; + struct gfs2_alloc_parms ap = {}; unsigned int data_blocks, ind_blocks; struct gfs2_holder *ghs, i_gh; unsigned int qx, x; @@ -976,8 +983,13 @@ out_dq: gfs2_log_flush(ip->i_gl->gl_name.ln_sbd, ip->i_gl, GFS2_LOG_HEAD_FLUSH_NORMAL | GFS2_LFC_DO_SYNC); if (!error) { - for (x = 0; x < num_qd; x++) - qda[x]->qd_sync_gen = sdp->sd_quota_sync_gen; + for (x = 0; x < num_qd; x++) { + qd = qda[x]; + spin_lock(&qd->qd_lockref.lock); + if (qd->qd_sync_gen < sync_gen) + qd->qd_sync_gen = sync_gen; + spin_unlock(&qd->qd_lockref.lock); + } } return error; } @@ -1002,7 +1014,9 @@ static int update_qd(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd) qlvb->qb_limit = q.qu_limit; qlvb->qb_warn = q.qu_warn; qlvb->qb_value = q.qu_value; + spin_lock(&qd->qd_lockref.lock); qd->qd_qb = *qlvb; + spin_unlock(&qd->qd_lockref.lock); return 0; } @@ -1024,7 +1038,9 @@ restart: if (test_and_clear_bit(QDF_REFRESH, &qd->qd_flags)) force_refresh = FORCE; + spin_lock(&qd->qd_lockref.lock); qd->qd_qb = *(struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr; + spin_unlock(&qd->qd_lockref.lock); if (force_refresh || qd->qd_qb.qb_magic != cpu_to_be32(GFS2_MAGIC)) { gfs2_glock_dq_uninit(q_gh); @@ -1063,8 +1079,7 @@ int gfs2_quota_lock(struct gfs2_inode *ip, kuid_t uid, kgid_t gid) u32 x; int error; - if (sdp->sd_args.ar_quota != GFS2_QUOTA_ON && - sdp->sd_args.ar_quota != GFS2_QUOTA_QUIET) + if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF) return 0; error = gfs2_quota_hold(ip, uid, gid); @@ -1096,35 +1111,36 @@ static bool need_sync(struct gfs2_quota_data *qd) { struct gfs2_sbd *sdp = qd->qd_sbd; struct gfs2_tune *gt = &sdp->sd_tune; - s64 value; + s64 value, change, limit; unsigned int num, den; + int ret = false; + spin_lock(&qd->qd_lockref.lock); if (!qd->qd_qb.qb_limit) - return false; + goto out; - spin_lock(&qd_lock); - value = qd->qd_change; - spin_unlock(&qd_lock); + change = qd->qd_change; + if (change <= 0) + goto out; + value = (s64)be64_to_cpu(qd->qd_qb.qb_value); + limit = (s64)be64_to_cpu(qd->qd_qb.qb_limit); + if (value >= limit) + goto out; spin_lock(>->gt_spin); num = gt->gt_quota_scale_num; den = gt->gt_quota_scale_den; spin_unlock(>->gt_spin); - if (value <= 0) - return false; - else if ((s64)be64_to_cpu(qd->qd_qb.qb_value) >= - (s64)be64_to_cpu(qd->qd_qb.qb_limit)) - return false; - else { - value *= gfs2_jindex_size(sdp) * num; - value = div_s64(value, den); - value += (s64)be64_to_cpu(qd->qd_qb.qb_value); - if (value < (s64)be64_to_cpu(qd->qd_qb.qb_limit)) - return false; - } + change *= gfs2_jindex_size(sdp) * num; + change = div_s64(change, den); + if (value + change < limit) + goto out; - return true; + ret = true; +out: + spin_unlock(&qd->qd_lockref.lock); + return ret; } void gfs2_quota_unlock(struct gfs2_inode *ip) @@ -1167,7 +1183,9 @@ void gfs2_quota_unlock(struct gfs2_inode *ip) } if (count) { - do_sync(count, qda); + u64 sync_gen = READ_ONCE(sdp->sd_quota_sync_gen); + + do_sync(count, qda, sync_gen); for (x = 0; x < count; x++) qd_unlock(qda[x]); } @@ -1177,17 +1195,16 @@ void gfs2_quota_unlock(struct gfs2_inode *ip) #define MAX_LINE 256 -static int print_message(struct gfs2_quota_data *qd, char *type) +static void print_message(struct gfs2_quota_data *qd, char *type) { struct gfs2_sbd *sdp = qd->qd_sbd; - if (sdp->sd_args.ar_quota != GFS2_QUOTA_QUIET) + if (sdp->sd_args.ar_quota != GFS2_QUOTA_QUIET) { fs_info(sdp, "quota %s for %s %u\n", type, (qd->qd_id.type == USRQUOTA) ? "user" : "group", from_kqid(&init_user_ns, qd->qd_id)); - - return 0; + } } /** @@ -1227,12 +1244,12 @@ int gfs2_quota_check(struct gfs2_inode *ip, kuid_t uid, kgid_t gid, qid_eq(qd->qd_id, make_kqid_gid(gid)))) continue; + spin_lock(&qd->qd_lockref.lock); warn = (s64)be64_to_cpu(qd->qd_qb.qb_warn); limit = (s64)be64_to_cpu(qd->qd_qb.qb_limit); value = (s64)be64_to_cpu(qd->qd_qb.qb_value); - spin_lock(&qd_lock); value += qd->qd_change; - spin_unlock(&qd_lock); + spin_unlock(&qd->qd_lockref.lock); if (limit > 0 && (limit - value) < ap->allowed) ap->allowed = limit - value; @@ -1257,7 +1274,8 @@ int gfs2_quota_check(struct gfs2_inode *ip, kuid_t uid, kgid_t gid, * HZ)) { quota_send_warning(qd->qd_id, sdp->sd_vfs->s_dev, QUOTA_NL_BSOFTWARN); - error = print_message(qd, "warning"); + print_message(qd, "warning"); + error = 0; qd->qd_last_warn = jiffies; } } @@ -1271,8 +1289,7 @@ void gfs2_quota_change(struct gfs2_inode *ip, s64 change, u32 x; struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); - if ((sdp->sd_args.ar_quota != GFS2_QUOTA_ON && - sdp->sd_args.ar_quota != GFS2_QUOTA_QUIET) || + if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF || gfs2_assert_warn(sdp, change)) return; if (ip->i_diskflags & GFS2_DIF_SYSTEM) @@ -1291,34 +1308,15 @@ void gfs2_quota_change(struct gfs2_inode *ip, s64 change, } } -static bool qd_changed(struct gfs2_sbd *sdp) -{ - struct gfs2_quota_data *qd; - bool changed = false; - - spin_lock(&qd_lock); - list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) { - if (test_bit(QDF_LOCKED, &qd->qd_flags) || - !test_bit(QDF_CHANGE, &qd->qd_flags)) - continue; - - changed = true; - break; - } - spin_unlock(&qd_lock); - return changed; -} - int gfs2_quota_sync(struct super_block *sb, int type) { struct gfs2_sbd *sdp = sb->s_fs_info; struct gfs2_quota_data **qda; unsigned int max_qd = PAGE_SIZE / sizeof(struct gfs2_holder); - unsigned int num_qd; - unsigned int x; + u64 sync_gen; int error = 0; - if (!qd_changed(sdp)) + if (sb_rdonly(sdp->sd_vfs)) return 0; qda = kcalloc(max_qd, sizeof(struct gfs2_quota_data *), GFP_KERNEL); @@ -1326,27 +1324,44 @@ int gfs2_quota_sync(struct super_block *sb, int type) return -ENOMEM; mutex_lock(&sdp->sd_quota_sync_mutex); - sdp->sd_quota_sync_gen++; + sync_gen = sdp->sd_quota_sync_gen + 1; do { - num_qd = 0; + struct gfs2_quota_data *iter; + unsigned int num_qd = 0; + unsigned int x; - for (;;) { - error = qd_fish(sdp, qda + num_qd); - if (error || !qda[num_qd]) - break; - if (++num_qd == max_qd) - break; + spin_lock(&qd_lock); + list_for_each_entry(iter, &sdp->sd_quota_list, qd_list) { + if (qd_grab_sync(sdp, iter, sync_gen)) { + qda[num_qd++] = iter; + if (num_qd == max_qd) + break; + } } + spin_unlock(&qd_lock); + + if (!num_qd) + break; - if (num_qd) { + for (x = 0; x < num_qd; x++) { + error = bh_get(qda[x]); if (!error) - error = do_sync(num_qd, qda); + continue; + + while (x < num_qd) + qd_ungrab_sync(qda[--num_qd]); + break; + } - for (x = 0; x < num_qd; x++) - qd_unlock(qda[x]); + if (!error) { + WRITE_ONCE(sdp->sd_quota_sync_gen, sync_gen); + error = do_sync(num_qd, qda, sync_gen); } - } while (!error && num_qd == max_qd); + + for (x = 0; x < num_qd; x++) + qd_unlock(qda[x]); + } while (!error); mutex_unlock(&sdp->sd_quota_sync_mutex); kfree(qda); @@ -1381,6 +1396,7 @@ int gfs2_quota_init(struct gfs2_sbd *sdp) unsigned int found = 0; unsigned int hash; unsigned int bm_size; + struct buffer_head *bh; u64 dblock; u32 extlen = 0; int error; @@ -1400,8 +1416,7 @@ int gfs2_quota_init(struct gfs2_sbd *sdp) return error; for (x = 0; x < blocks; x++) { - struct buffer_head *bh; - const struct gfs2_quota_change *qc; + struct gfs2_quota_change *qc; unsigned int y; if (!extlen) { @@ -1414,15 +1429,13 @@ int gfs2_quota_init(struct gfs2_sbd *sdp) bh = gfs2_meta_ra(ip->i_gl, dblock, extlen); if (!bh) goto fail; - if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC)) { - brelse(bh); - goto fail; - } + if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC)) + goto fail_brelse; - qc = (const struct gfs2_quota_change *)(bh->b_data + sizeof(struct gfs2_meta_header)); + qc = (struct gfs2_quota_change *)(bh->b_data + sizeof(struct gfs2_meta_header)); for (y = 0; y < sdp->sd_qc_per_block && slot < sdp->sd_quota_slots; y++, slot++) { - struct gfs2_quota_data *qd; + struct gfs2_quota_data *old_qd, *qd; s64 qc_change = be64_to_cpu(qc->qc_change); u32 qc_flags = be32_to_cpu(qc->qc_flags); enum quota_type qtype = (qc_flags & GFS2_QCF_USER) ? @@ -1435,10 +1448,8 @@ int gfs2_quota_init(struct gfs2_sbd *sdp) hash = gfs2_qd_hash(sdp, qc_id); qd = qd_alloc(hash, sdp, qc_id); - if (qd == NULL) { - brelse(bh); - goto fail; - } + if (qd == NULL) + goto fail_brelse; set_bit(QDF_CHANGE, &qd->qd_flags); qd->qd_change = qc_change; @@ -1446,18 +1457,41 @@ int gfs2_quota_init(struct gfs2_sbd *sdp) qd->qd_slot_ref = 1; spin_lock(&qd_lock); + spin_lock_bucket(hash); + old_qd = gfs2_qd_search_bucket(hash, sdp, qc_id); + if (old_qd) { + fs_err(sdp, "Corruption found in quota_change%u" + "file: duplicate identifier in " + "slot %u\n", + sdp->sd_jdesc->jd_jid, slot); + + spin_unlock_bucket(hash); + spin_unlock(&qd_lock); + qd_put(old_qd); + + gfs2_glock_put(qd->qd_gl); + kmem_cache_free(gfs2_quotad_cachep, qd); + + /* zero out the duplicate slot */ + lock_buffer(bh); + memset(qc, 0, sizeof(*qc)); + mark_buffer_dirty(bh); + unlock_buffer(bh); + + continue; + } BUG_ON(test_and_set_bit(slot, sdp->sd_quota_bitmap)); list_add(&qd->qd_list, &sdp->sd_quota_list); atomic_inc(&sdp->sd_quota_count); - spin_unlock(&qd_lock); - - spin_lock_bucket(hash); hlist_bl_add_head_rcu(&qd->qd_hlist, &qd_hash_table[hash]); spin_unlock_bucket(hash); + spin_unlock(&qd_lock); found++; } + if (buffer_dirty(bh)) + sync_dirty_buffer(bh); brelse(bh); dblock++; extlen--; @@ -1468,6 +1502,10 @@ int gfs2_quota_init(struct gfs2_sbd *sdp) return 0; +fail_brelse: + if (buffer_dirty(bh)) + sync_dirty_buffer(bh); + brelse(bh); fail: gfs2_quota_cleanup(sdp); return error; @@ -1492,7 +1530,7 @@ void gfs2_quota_cleanup(struct gfs2_sbd *sdp) lockref_mark_dead(&qd->qd_lockref); spin_unlock(&qd->qd_lockref.lock); - list_lru_del(&gfs2_qd_lru, &qd->qd_lru); + list_lru_del_obj(&gfs2_qd_lru, &qd->qd_lru); list_add(&qd->qd_lru, &dispose); } spin_unlock(&qd_lock); @@ -1557,6 +1595,7 @@ int gfs2_quotad(void *data) unsigned long quotad_timeo = 0; unsigned long t = 0; + set_freezable(); while (!kthread_should_stop()) { if (gfs2_withdrawing_or_withdrawn(sdp)) break; @@ -1576,11 +1615,9 @@ int gfs2_quotad(void *data) quotad_check_timeo(sdp, "sync", gfs2_quota_sync, t, "ad_timeo, &tune->gt_quota_quantum); - try_to_freeze(); - t = min(quotad_timeo, statfs_timeo); - t = wait_event_interruptible_timeout(sdp->sd_quota_wait, + t = wait_event_freezable_timeout(sdp->sd_quota_wait, sdp->sd_statfs_force_sync || gfs2_withdrawing_or_withdrawn(sdp) || kthread_should_stop(), @@ -1730,7 +1767,7 @@ static int gfs2_set_dqblk(struct super_block *sb, struct kqid qid, if (gfs2_is_stuffed(ip)) alloc_required = 1; if (alloc_required) { - struct gfs2_alloc_parms ap = { .aflags = 0, }; + struct gfs2_alloc_parms ap = {}; gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota), &data_blocks, &ind_blocks); blocks = 1 + data_blocks + ind_blocks; |