2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
11 * Quota change tags are associated with each transaction that allocates or
12 * deallocates space. Those changes are accumulated locally to each node (in a
13 * per-node file) and then are periodically synced to the quota file. This
14 * avoids the bottleneck of constantly touching the quota file, but introduces
15 * fuzziness in the current usage value of IDs that are being used on different
16 * nodes in the cluster simultaneously. So, it is possible for a user on
17 * multiple nodes to overrun their quota, but that overrun is controlable.
18 * Since quota tags are part of transactions, there is no need to a quota check
19 * program to be run on node crashes or anything like that.
21 * There are couple of knobs that let the administrator manage the quota
22 * fuzziness. "quota_quantum" sets the maximum time a quota change can be
23 * sitting on one node before being synced to the quota file. (The default is
24 * 60 seconds.) Another knob, "quota_scale" controls how quickly the frequency
25 * of quota file syncs increases as the user moves closer to their limit. The
26 * more frequent the syncs, the more accurate the quota enforcement, but that
27 * means that there is more contention between the nodes for the quota file.
28 * The default value is one. This sets the maximum theoretical quota overrun
29 * (with infinite node with infinite bandwidth) to twice the user's limit. (In
30 * practice, the maximum overrun you see should be much less.) A "quota_scale"
31 * number greater than one makes quota syncs more frequent and reduces the
32 * maximum overrun. Numbers less than one (but greater than zero) make quota
33 * syncs less frequent.
35 * GFS quotas also use per-ID Lock Value Blocks (LVBs) to cache the contents of
36 * the quota file, so it is not being constantly read.
39 #include <linux/sched.h>
40 #include <linux/slab.h>
41 #include <linux/spinlock.h>
42 #include <linux/completion.h>
43 #include <linux/buffer_head.h>
44 #include <linux/sort.h>
46 #include <linux/gfs2_ondisk.h>
49 #include "lm_interface.h"
62 #include "ops_address.h"
68 static u64 qd2offset(struct gfs2_quota_data *qd)
72 offset = 2 * (u64)qd->qd_id + !test_bit(QDF_USER, &qd->qd_flags);
73 offset *= sizeof(struct gfs2_quota);
78 static int qd_alloc(struct gfs2_sbd *sdp, int user, u32 id,
79 struct gfs2_quota_data **qdp)
81 struct gfs2_quota_data *qd;
84 qd = kzalloc(sizeof(struct gfs2_quota_data), GFP_KERNEL);
91 set_bit(QDF_USER, &qd->qd_flags);
94 error = gfs2_glock_get(sdp, 2 * (u64)id + !user,
95 &gfs2_quota_glops, CREATE, &qd->qd_gl);
99 error = gfs2_lvb_hold(qd->qd_gl);
100 gfs2_glock_put(qd->qd_gl);
113 static int qd_get(struct gfs2_sbd *sdp, int user, u32 id, int create,
114 struct gfs2_quota_data **qdp)
116 struct gfs2_quota_data *qd = NULL, *new_qd = NULL;
123 spin_lock(&sdp->sd_quota_spin);
124 list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) {
125 if (qd->qd_id == id &&
126 !test_bit(QDF_USER, &qd->qd_flags) == !user) {
138 list_add(&qd->qd_list, &sdp->sd_quota_list);
139 atomic_inc(&sdp->sd_quota_count);
143 spin_unlock(&sdp->sd_quota_spin);
147 gfs2_lvb_unhold(new_qd->qd_gl);
154 error = qd_alloc(sdp, user, id, &new_qd);
160 static void qd_hold(struct gfs2_quota_data *qd)
162 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
164 spin_lock(&sdp->sd_quota_spin);
165 gfs2_assert(sdp, qd->qd_count);
167 spin_unlock(&sdp->sd_quota_spin);
170 static void qd_put(struct gfs2_quota_data *qd)
172 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
173 spin_lock(&sdp->sd_quota_spin);
174 gfs2_assert(sdp, qd->qd_count);
176 qd->qd_last_touched = jiffies;
177 spin_unlock(&sdp->sd_quota_spin);
180 static int slot_get(struct gfs2_quota_data *qd)
182 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
183 unsigned int c, o = 0, b;
184 unsigned char byte = 0;
186 spin_lock(&sdp->sd_quota_spin);
188 if (qd->qd_slot_count++) {
189 spin_unlock(&sdp->sd_quota_spin);
193 for (c = 0; c < sdp->sd_quota_chunks; c++)
194 for (o = 0; o < PAGE_SIZE; o++) {
195 byte = sdp->sd_quota_bitmap[c][o];
203 for (b = 0; b < 8; b++)
204 if (!(byte & (1 << b)))
206 qd->qd_slot = c * (8 * PAGE_SIZE) + o * 8 + b;
208 if (qd->qd_slot >= sdp->sd_quota_slots)
211 sdp->sd_quota_bitmap[c][o] |= 1 << b;
213 spin_unlock(&sdp->sd_quota_spin);
219 spin_unlock(&sdp->sd_quota_spin);
223 static void slot_hold(struct gfs2_quota_data *qd)
225 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
227 spin_lock(&sdp->sd_quota_spin);
228 gfs2_assert(sdp, qd->qd_slot_count);
230 spin_unlock(&sdp->sd_quota_spin);
233 static void slot_put(struct gfs2_quota_data *qd)
235 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
237 spin_lock(&sdp->sd_quota_spin);
238 gfs2_assert(sdp, qd->qd_slot_count);
239 if (!--qd->qd_slot_count) {
240 gfs2_icbit_munge(sdp, sdp->sd_quota_bitmap, qd->qd_slot, 0);
243 spin_unlock(&sdp->sd_quota_spin);
246 static int bh_get(struct gfs2_quota_data *qd)
248 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
249 struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
250 unsigned int block, offset;
253 struct buffer_head *bh;
257 mutex_lock(&sdp->sd_quota_mutex);
259 if (qd->qd_bh_count++) {
260 mutex_unlock(&sdp->sd_quota_mutex);
264 block = qd->qd_slot / sdp->sd_qc_per_block;
265 offset = qd->qd_slot % sdp->sd_qc_per_block;;
267 error = gfs2_block_map(&ip->i_inode, block, &new, &dblock, &boundary);
270 error = gfs2_meta_read(ip->i_gl, dblock, DIO_START | DIO_WAIT, &bh);
274 if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC))
278 qd->qd_bh_qc = (struct gfs2_quota_change *)
279 (bh->b_data + sizeof(struct gfs2_meta_header) +
280 offset * sizeof(struct gfs2_quota_change));
282 mutex_lock(&sdp->sd_quota_mutex);
290 mutex_unlock(&sdp->sd_quota_mutex);
294 static void bh_put(struct gfs2_quota_data *qd)
296 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
298 mutex_lock(&sdp->sd_quota_mutex);
299 gfs2_assert(sdp, qd->qd_bh_count);
300 if (!--qd->qd_bh_count) {
305 mutex_unlock(&sdp->sd_quota_mutex);
308 static int qd_fish(struct gfs2_sbd *sdp, struct gfs2_quota_data **qdp)
310 struct gfs2_quota_data *qd = NULL;
316 if (sdp->sd_vfs->s_flags & MS_RDONLY)
319 spin_lock(&sdp->sd_quota_spin);
321 list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) {
322 if (test_bit(QDF_LOCKED, &qd->qd_flags) ||
323 !test_bit(QDF_CHANGE, &qd->qd_flags) ||
324 qd->qd_sync_gen >= sdp->sd_quota_sync_gen)
327 list_move_tail(&qd->qd_list, &sdp->sd_quota_list);
329 set_bit(QDF_LOCKED, &qd->qd_flags);
330 gfs2_assert_warn(sdp, qd->qd_count);
332 qd->qd_change_sync = qd->qd_change;
333 gfs2_assert_warn(sdp, qd->qd_slot_count);
343 spin_unlock(&sdp->sd_quota_spin);
346 gfs2_assert_warn(sdp, qd->qd_change_sync);
349 clear_bit(QDF_LOCKED, &qd->qd_flags);
361 static int qd_trylock(struct gfs2_quota_data *qd)
363 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
365 if (sdp->sd_vfs->s_flags & MS_RDONLY)
368 spin_lock(&sdp->sd_quota_spin);
370 if (test_bit(QDF_LOCKED, &qd->qd_flags) ||
371 !test_bit(QDF_CHANGE, &qd->qd_flags)) {
372 spin_unlock(&sdp->sd_quota_spin);
376 list_move_tail(&qd->qd_list, &sdp->sd_quota_list);
378 set_bit(QDF_LOCKED, &qd->qd_flags);
379 gfs2_assert_warn(sdp, qd->qd_count);
381 qd->qd_change_sync = qd->qd_change;
382 gfs2_assert_warn(sdp, qd->qd_slot_count);
385 spin_unlock(&sdp->sd_quota_spin);
387 gfs2_assert_warn(sdp, qd->qd_change_sync);
389 clear_bit(QDF_LOCKED, &qd->qd_flags);
398 static void qd_unlock(struct gfs2_quota_data *qd)
400 gfs2_assert_warn(qd->qd_gl->gl_sbd,
401 test_bit(QDF_LOCKED, &qd->qd_flags));
402 clear_bit(QDF_LOCKED, &qd->qd_flags);
408 static int qdsb_get(struct gfs2_sbd *sdp, int user, u32 id, int create,
409 struct gfs2_quota_data **qdp)
413 error = qd_get(sdp, user, id, create, qdp);
417 error = slot_get(*qdp);
421 error = bh_get(*qdp);
434 static void qdsb_put(struct gfs2_quota_data *qd)
441 int gfs2_quota_hold(struct gfs2_inode *ip, u32 uid, u32 gid)
443 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
444 struct gfs2_alloc *al = &ip->i_alloc;
445 struct gfs2_quota_data **qd = al->al_qd;
448 if (gfs2_assert_warn(sdp, !al->al_qd_num) ||
449 gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags)))
452 if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
455 error = qdsb_get(sdp, QUOTA_USER, ip->i_di.di_uid, CREATE, qd);
461 error = qdsb_get(sdp, QUOTA_GROUP, ip->i_di.di_gid, CREATE, qd);
467 if (uid != NO_QUOTA_CHANGE && uid != ip->i_di.di_uid) {
468 error = qdsb_get(sdp, QUOTA_USER, uid, CREATE, qd);
475 if (gid != NO_QUOTA_CHANGE && gid != ip->i_di.di_gid) {
476 error = qdsb_get(sdp, QUOTA_GROUP, gid, CREATE, qd);
485 gfs2_quota_unhold(ip);
489 void gfs2_quota_unhold(struct gfs2_inode *ip)
491 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
492 struct gfs2_alloc *al = &ip->i_alloc;
495 gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags));
497 for (x = 0; x < al->al_qd_num; x++) {
498 qdsb_put(al->al_qd[x]);
504 static int sort_qd(const void *a, const void *b)
506 struct gfs2_quota_data *qd_a = *(struct gfs2_quota_data **)a;
507 struct gfs2_quota_data *qd_b = *(struct gfs2_quota_data **)b;
510 if (!test_bit(QDF_USER, &qd_a->qd_flags) !=
511 !test_bit(QDF_USER, &qd_b->qd_flags)) {
512 if (test_bit(QDF_USER, &qd_a->qd_flags))
517 if (qd_a->qd_id < qd_b->qd_id)
519 else if (qd_a->qd_id > qd_b->qd_id)
526 static void do_qc(struct gfs2_quota_data *qd, s64 change)
528 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
529 struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
530 struct gfs2_quota_change *qc = qd->qd_bh_qc;
533 mutex_lock(&sdp->sd_quota_mutex);
534 gfs2_trans_add_bh(ip->i_gl, qd->qd_bh, 1);
536 if (!test_bit(QDF_CHANGE, &qd->qd_flags)) {
539 if (test_bit(QDF_USER, &qd->qd_flags))
540 qc->qc_flags = cpu_to_be32(GFS2_QCF_USER);
541 qc->qc_id = cpu_to_be32(qd->qd_id);
545 x = be64_to_cpu(x) + change;
546 qc->qc_change = cpu_to_be64(x);
548 spin_lock(&sdp->sd_quota_spin);
550 spin_unlock(&sdp->sd_quota_spin);
553 gfs2_assert_warn(sdp, test_bit(QDF_CHANGE, &qd->qd_flags));
554 clear_bit(QDF_CHANGE, &qd->qd_flags);
559 } else if (!test_and_set_bit(QDF_CHANGE, &qd->qd_flags)) {
564 mutex_unlock(&sdp->sd_quota_mutex);
570 * This function was mostly borrowed from gfs2_block_truncate_page which was
571 * in turn mostly borrowed from ext3
573 static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
574 s64 change, struct gfs2_quota_data *qd)
576 struct inode *inode = &ip->i_inode;
577 struct address_space *mapping = inode->i_mapping;
578 unsigned long index = loc >> PAGE_CACHE_SHIFT;
579 unsigned offset = loc & (PAGE_CACHE_SHIFT - 1);
580 unsigned blocksize, iblock, pos;
581 struct buffer_head *bh;
588 page = grab_cache_page(mapping, index);
592 blocksize = inode->i_sb->s_blocksize;
593 iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
595 if (!page_has_buffers(page))
596 create_empty_buffers(page, blocksize, 0);
598 bh = page_buffers(page);
600 while (offset >= pos) {
601 bh = bh->b_this_page;
606 if (!buffer_mapped(bh)) {
607 gfs2_get_block(inode, iblock, bh, 1);
608 if (!buffer_mapped(bh))
612 if (PageUptodate(page))
613 set_buffer_uptodate(bh);
615 if (!buffer_uptodate(bh)) {
616 ll_rw_block(READ, 1, &bh);
618 if (!buffer_uptodate(bh))
622 gfs2_trans_add_bh(ip->i_gl, bh, 0);
624 kaddr = kmap_atomic(page, KM_USER0);
625 ptr = (__be64 *)(kaddr + offset);
626 value = (s64)be64_to_cpu(*ptr) + change;
627 *ptr = cpu_to_be64(value);
628 flush_dcache_page(page);
629 kunmap_atomic(kaddr, KM_USER0);
631 qd->qd_qb.qb_magic = cpu_to_be32(GFS2_MAGIC);
633 qd->qd_qb.qb_limit = cpu_to_be64(q.qu_limit);
634 qd->qd_qb.qb_warn = cpu_to_be64(q.qu_warn);
636 qd->qd_qb.qb_value = cpu_to_be64(value);
639 page_cache_release(page);
643 static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
645 struct gfs2_sbd *sdp = (*qda)->qd_gl->gl_sbd;
646 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
647 unsigned int data_blocks, ind_blocks;
648 struct gfs2_holder *ghs, i_gh;
650 struct gfs2_quota_data *qd;
652 unsigned int nalloc = 0;
653 struct gfs2_alloc *al = NULL;
656 gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota),
657 &data_blocks, &ind_blocks);
659 ghs = kcalloc(num_qd, sizeof(struct gfs2_holder), GFP_KERNEL);
663 sort(qda, num_qd, sizeof(struct gfs2_quota_data *), sort_qd, NULL);
664 for (qx = 0; qx < num_qd; qx++) {
665 error = gfs2_glock_nq_init(qda[qx]->qd_gl,
667 GL_NOCACHE, &ghs[qx]);
672 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
676 for (x = 0; x < num_qd; x++) {
679 offset = qd2offset(qda[x]);
680 error = gfs2_write_alloc_required(ip, offset,
681 sizeof(struct gfs2_quota),
690 al = gfs2_alloc_get(ip);
692 al->al_requested = nalloc * (data_blocks + ind_blocks);
694 error = gfs2_inplace_reserve(ip);
698 error = gfs2_trans_begin(sdp,
699 al->al_rgd->rd_ri.ri_length +
700 num_qd * data_blocks +
701 nalloc * ind_blocks +
702 RES_DINODE + num_qd +
707 error = gfs2_trans_begin(sdp,
708 num_qd * data_blocks +
709 RES_DINODE + num_qd, 0);
714 for (x = 0; x < num_qd; x++) {
716 offset = qd2offset(qd);
717 error = gfs2_adjust_quota(ip, offset, qd->qd_change_sync,
718 (struct gfs2_quota_data *)
723 do_qc(qd, -qd->qd_change_sync);
732 gfs2_inplace_release(ip);
737 gfs2_glock_dq_uninit(&i_gh);
740 gfs2_glock_dq_uninit(&ghs[qx]);
742 gfs2_log_flush(ip->i_gl->gl_sbd, ip->i_gl);
746 static int do_glock(struct gfs2_quota_data *qd, int force_refresh,
747 struct gfs2_holder *q_gh)
749 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
750 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
751 struct gfs2_holder i_gh;
753 char buf[sizeof(struct gfs2_quota)];
754 struct file_ra_state ra_state;
756 struct gfs2_quota_lvb *qlvb;
758 file_ra_state_init(&ra_state, sdp->sd_quota_inode->i_mapping);
760 error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_SHARED, 0, q_gh);
764 qd->qd_qb = *(struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb;
766 if (force_refresh || qd->qd_qb.qb_magic != cpu_to_be32(GFS2_MAGIC)) {
768 gfs2_glock_dq_uninit(q_gh);
769 error = gfs2_glock_nq_init(qd->qd_gl,
770 LM_ST_EXCLUSIVE, GL_NOCACHE,
775 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &i_gh);
779 memset(buf, 0, sizeof(struct gfs2_quota));
781 error = gfs2_internal_read(ip, &ra_state, buf,
782 &pos, sizeof(struct gfs2_quota));
786 gfs2_glock_dq_uninit(&i_gh);
789 gfs2_quota_in(&q, buf);
790 qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb;
791 qlvb->qb_magic = cpu_to_be32(GFS2_MAGIC);
793 qlvb->qb_limit = cpu_to_be64(q.qu_limit);
794 qlvb->qb_warn = cpu_to_be64(q.qu_warn);
795 qlvb->qb_value = cpu_to_be64(q.qu_value);
798 if (gfs2_glock_is_blocking(qd->qd_gl)) {
799 gfs2_glock_dq_uninit(q_gh);
808 gfs2_glock_dq_uninit(&i_gh);
810 gfs2_glock_dq_uninit(q_gh);
814 int gfs2_quota_lock(struct gfs2_inode *ip, u32 uid, u32 gid)
816 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
817 struct gfs2_alloc *al = &ip->i_alloc;
821 gfs2_quota_hold(ip, uid, gid);
823 if (capable(CAP_SYS_RESOURCE) ||
824 sdp->sd_args.ar_quota != GFS2_QUOTA_ON)
827 sort(al->al_qd, al->al_qd_num, sizeof(struct gfs2_quota_data *),
830 for (x = 0; x < al->al_qd_num; x++) {
831 error = do_glock(al->al_qd[x], NO_FORCE, &al->al_qd_ghs[x]);
837 set_bit(GIF_QD_LOCKED, &ip->i_flags);
840 gfs2_glock_dq_uninit(&al->al_qd_ghs[x]);
841 gfs2_quota_unhold(ip);
847 static int need_sync(struct gfs2_quota_data *qd)
849 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
850 struct gfs2_tune *gt = &sdp->sd_tune;
852 unsigned int num, den;
855 if (!qd->qd_qb.qb_limit)
858 spin_lock(&sdp->sd_quota_spin);
859 value = qd->qd_change;
860 spin_unlock(&sdp->sd_quota_spin);
862 spin_lock(>->gt_spin);
863 num = gt->gt_quota_scale_num;
864 den = gt->gt_quota_scale_den;
865 spin_unlock(>->gt_spin);
869 else if ((s64)be64_to_cpu(qd->qd_qb.qb_value) >=
870 (s64)be64_to_cpu(qd->qd_qb.qb_limit))
873 value *= gfs2_jindex_size(sdp) * num;
875 value += (s64)be64_to_cpu(qd->qd_qb.qb_value);
876 if (value < (s64)be64_to_cpu(qd->qd_qb.qb_limit))
883 void gfs2_quota_unlock(struct gfs2_inode *ip)
885 struct gfs2_alloc *al = &ip->i_alloc;
886 struct gfs2_quota_data *qda[4];
887 unsigned int count = 0;
890 if (!test_and_clear_bit(GIF_QD_LOCKED, &ip->i_flags))
893 for (x = 0; x < al->al_qd_num; x++) {
894 struct gfs2_quota_data *qd;
898 sync = need_sync(qd);
900 gfs2_glock_dq_uninit(&al->al_qd_ghs[x]);
902 if (sync && qd_trylock(qd))
908 for (x = 0; x < count; x++)
913 gfs2_quota_unhold(ip);
918 static int print_message(struct gfs2_quota_data *qd, char *type)
920 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
922 printk(KERN_INFO "GFS2: fsid=%s: quota %s for %s %u\r\n",
923 sdp->sd_fsname, type,
924 (test_bit(QDF_USER, &qd->qd_flags)) ? "user" : "group",
930 int gfs2_quota_check(struct gfs2_inode *ip, u32 uid, u32 gid)
932 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
933 struct gfs2_alloc *al = &ip->i_alloc;
934 struct gfs2_quota_data *qd;
939 if (!test_bit(GIF_QD_LOCKED, &ip->i_flags))
942 if (sdp->sd_args.ar_quota != GFS2_QUOTA_ON)
945 for (x = 0; x < al->al_qd_num; x++) {
948 if (!((qd->qd_id == uid && test_bit(QDF_USER, &qd->qd_flags)) ||
949 (qd->qd_id == gid && !test_bit(QDF_USER, &qd->qd_flags))))
952 value = (s64)be64_to_cpu(qd->qd_qb.qb_value);
953 spin_lock(&sdp->sd_quota_spin);
954 value += qd->qd_change;
955 spin_unlock(&sdp->sd_quota_spin);
957 if (be64_to_cpu(qd->qd_qb.qb_limit) && (s64)be64_to_cpu(qd->qd_qb.qb_limit) < value) {
958 print_message(qd, "exceeded");
961 } else if (be64_to_cpu(qd->qd_qb.qb_warn) &&
962 (s64)be64_to_cpu(qd->qd_qb.qb_warn) < value &&
963 time_after_eq(jiffies, qd->qd_last_warn +
965 gt_quota_warn_period) * HZ)) {
966 error = print_message(qd, "warning");
967 qd->qd_last_warn = jiffies;
974 void gfs2_quota_change(struct gfs2_inode *ip, s64 change,
977 struct gfs2_alloc *al = &ip->i_alloc;
978 struct gfs2_quota_data *qd;
980 unsigned int found = 0;
982 if (gfs2_assert_warn(GFS2_SB(&ip->i_inode), change))
984 if (ip->i_di.di_flags & GFS2_DIF_SYSTEM)
987 for (x = 0; x < al->al_qd_num; x++) {
990 if ((qd->qd_id == uid && test_bit(QDF_USER, &qd->qd_flags)) ||
991 (qd->qd_id == gid && !test_bit(QDF_USER, &qd->qd_flags))) {
998 int gfs2_quota_sync(struct gfs2_sbd *sdp)
1000 struct gfs2_quota_data **qda;
1001 unsigned int max_qd = gfs2_tune_get(sdp, gt_quota_simul_sync);
1002 unsigned int num_qd;
1006 sdp->sd_quota_sync_gen++;
1008 qda = kcalloc(max_qd, sizeof(struct gfs2_quota_data *), GFP_KERNEL);
1016 error = qd_fish(sdp, qda + num_qd);
1017 if (error || !qda[num_qd])
1019 if (++num_qd == max_qd)
1025 error = do_sync(num_qd, qda);
1027 for (x = 0; x < num_qd; x++)
1028 qda[x]->qd_sync_gen =
1029 sdp->sd_quota_sync_gen;
1031 for (x = 0; x < num_qd; x++)
1034 } while (!error && num_qd == max_qd);
1041 int gfs2_quota_refresh(struct gfs2_sbd *sdp, int user, u32 id)
1043 struct gfs2_quota_data *qd;
1044 struct gfs2_holder q_gh;
1047 error = qd_get(sdp, user, id, CREATE, &qd);
1051 error = do_glock(qd, FORCE, &q_gh);
1053 gfs2_glock_dq_uninit(&q_gh);
1061 int gfs2_quota_read(struct gfs2_sbd *sdp, int user, u32 id,
1062 struct gfs2_quota *q)
1064 struct gfs2_quota_data *qd;
1065 struct gfs2_holder q_gh;
1068 if (((user) ? (id != current->fsuid) : (!in_group_p(id))) &&
1069 !capable(CAP_SYS_ADMIN))
1072 error = qd_get(sdp, user, id, CREATE, &qd);
1076 error = do_glock(qd, NO_FORCE, &q_gh);
1080 memset(q, 0, sizeof(struct gfs2_quota));
1081 q->qu_limit = be64_to_cpu(qd->qd_qb.qb_limit);
1082 q->qu_warn = be64_to_cpu(qd->qd_qb.qb_warn);
1083 q->qu_value = be64_to_cpu(qd->qd_qb.qb_value);
1085 spin_lock(&sdp->sd_quota_spin);
1086 q->qu_value += qd->qd_change;
1087 spin_unlock(&sdp->sd_quota_spin);
1089 gfs2_glock_dq_uninit(&q_gh);
1097 int gfs2_quota_init(struct gfs2_sbd *sdp)
1099 struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
1100 unsigned int blocks = ip->i_di.di_size >> sdp->sd_sb.sb_bsize_shift;
1101 unsigned int x, slot = 0;
1102 unsigned int found = 0;
1107 if (!ip->i_di.di_size ||
1108 ip->i_di.di_size > (64 << 20) ||
1109 ip->i_di.di_size & (sdp->sd_sb.sb_bsize - 1)) {
1110 gfs2_consist_inode(ip);
1113 sdp->sd_quota_slots = blocks * sdp->sd_qc_per_block;
1114 sdp->sd_quota_chunks = DIV_ROUND_UP(sdp->sd_quota_slots, 8 * PAGE_SIZE);
1118 sdp->sd_quota_bitmap = kcalloc(sdp->sd_quota_chunks,
1119 sizeof(unsigned char *), GFP_KERNEL);
1120 if (!sdp->sd_quota_bitmap)
1123 for (x = 0; x < sdp->sd_quota_chunks; x++) {
1124 sdp->sd_quota_bitmap[x] = kzalloc(PAGE_SIZE, GFP_KERNEL);
1125 if (!sdp->sd_quota_bitmap[x])
1129 for (x = 0; x < blocks; x++) {
1130 struct buffer_head *bh;
1135 error = gfs2_extent_map(&ip->i_inode, x, &new, &dblock, &extlen);
1139 gfs2_meta_ra(ip->i_gl, dblock, extlen);
1140 error = gfs2_meta_read(ip->i_gl, dblock, DIO_START | DIO_WAIT,
1145 if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC)) {
1151 y < sdp->sd_qc_per_block && slot < sdp->sd_quota_slots;
1153 struct gfs2_quota_change qc;
1154 struct gfs2_quota_data *qd;
1156 gfs2_quota_change_in(&qc, bh->b_data +
1157 sizeof(struct gfs2_meta_header) +
1158 y * sizeof(struct gfs2_quota_change));
1162 error = qd_alloc(sdp, (qc.qc_flags & GFS2_QCF_USER),
1169 set_bit(QDF_CHANGE, &qd->qd_flags);
1170 qd->qd_change = qc.qc_change;
1172 qd->qd_slot_count = 1;
1173 qd->qd_last_touched = jiffies;
1175 spin_lock(&sdp->sd_quota_spin);
1176 gfs2_icbit_munge(sdp, sdp->sd_quota_bitmap, slot, 1);
1177 list_add(&qd->qd_list, &sdp->sd_quota_list);
1178 atomic_inc(&sdp->sd_quota_count);
1179 spin_unlock(&sdp->sd_quota_spin);
1190 fs_info(sdp, "found %u quota changes\n", found);
1195 gfs2_quota_cleanup(sdp);
1199 void gfs2_quota_scan(struct gfs2_sbd *sdp)
1201 struct gfs2_quota_data *qd, *safe;
1204 spin_lock(&sdp->sd_quota_spin);
1205 list_for_each_entry_safe(qd, safe, &sdp->sd_quota_list, qd_list) {
1206 if (!qd->qd_count &&
1207 time_after_eq(jiffies, qd->qd_last_touched +
1208 gfs2_tune_get(sdp, gt_quota_cache_secs) * HZ)) {
1209 list_move(&qd->qd_list, &dead);
1210 gfs2_assert_warn(sdp,
1211 atomic_read(&sdp->sd_quota_count) > 0);
1212 atomic_dec(&sdp->sd_quota_count);
1215 spin_unlock(&sdp->sd_quota_spin);
1217 while (!list_empty(&dead)) {
1218 qd = list_entry(dead.next, struct gfs2_quota_data, qd_list);
1219 list_del(&qd->qd_list);
1221 gfs2_assert_warn(sdp, !qd->qd_change);
1222 gfs2_assert_warn(sdp, !qd->qd_slot_count);
1223 gfs2_assert_warn(sdp, !qd->qd_bh_count);
1225 gfs2_lvb_unhold(qd->qd_gl);
1230 void gfs2_quota_cleanup(struct gfs2_sbd *sdp)
1232 struct list_head *head = &sdp->sd_quota_list;
1233 struct gfs2_quota_data *qd;
1236 spin_lock(&sdp->sd_quota_spin);
1237 while (!list_empty(head)) {
1238 qd = list_entry(head->prev, struct gfs2_quota_data, qd_list);
1240 if (qd->qd_count > 1 ||
1241 (qd->qd_count && !test_bit(QDF_CHANGE, &qd->qd_flags))) {
1242 list_move(&qd->qd_list, head);
1243 spin_unlock(&sdp->sd_quota_spin);
1245 spin_lock(&sdp->sd_quota_spin);
1249 list_del(&qd->qd_list);
1250 atomic_dec(&sdp->sd_quota_count);
1251 spin_unlock(&sdp->sd_quota_spin);
1253 if (!qd->qd_count) {
1254 gfs2_assert_warn(sdp, !qd->qd_change);
1255 gfs2_assert_warn(sdp, !qd->qd_slot_count);
1257 gfs2_assert_warn(sdp, qd->qd_slot_count == 1);
1258 gfs2_assert_warn(sdp, !qd->qd_bh_count);
1260 gfs2_lvb_unhold(qd->qd_gl);
1263 spin_lock(&sdp->sd_quota_spin);
1265 spin_unlock(&sdp->sd_quota_spin);
1267 gfs2_assert_warn(sdp, !atomic_read(&sdp->sd_quota_count));
1269 if (sdp->sd_quota_bitmap) {
1270 for (x = 0; x < sdp->sd_quota_chunks; x++)
1271 kfree(sdp->sd_quota_bitmap[x]);
1272 kfree(sdp->sd_quota_bitmap);