2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License v.2.
11 * Quota change tags are associated with each transaction that allocates or
12 * deallocates space. Those changes are accumulated locally to each node (in a
13 * per-node file) and then are periodically synced to the quota file. This
14 * avoids the bottleneck of constantly touching the quota file, but introduces
15 * fuzziness in the current usage value of IDs that are being used on different
16 * nodes in the cluster simultaneously. So, it is possible for a user on
17 * multiple nodes to overrun their quota, but that overrun is controlable.
18 * Since quota tags are part of transactions, there is no need to a quota check
19 * program to be run on node crashes or anything like that.
21 * There are couple of knobs that let the administrator manage the quota
22 * fuzziness. "quota_quantum" sets the maximum time a quota change can be
23 * sitting on one node before being synced to the quota file. (The default is
24 * 60 seconds.) Another knob, "quota_scale" controls how quickly the frequency
25 * of quota file syncs increases as the user moves closer to their limit. The
26 * more frequent the syncs, the more accurate the quota enforcement, but that
27 * means that there is more contention between the nodes for the quota file.
28 * The default value is one. This sets the maximum theoretical quota overrun
29 * (with infinite node with infinite bandwidth) to twice the user's limit. (In
30 * practice, the maximum overrun you see should be much less.) A "quota_scale"
31 * number greater than one makes quota syncs more frequent and reduces the
32 * maximum overrun. Numbers less than one (but greater than zero) make quota
33 * syncs less frequent.
35 * GFS quotas also use per-ID Lock Value Blocks (LVBs) to cache the contents of
36 * the quota file, so it is not being constantly read.
39 #include <linux/sched.h>
40 #include <linux/slab.h>
41 #include <linux/spinlock.h>
42 #include <linux/completion.h>
43 #include <linux/buffer_head.h>
44 #include <linux/tty.h>
45 #include <linux/sort.h>
47 #include <linux/gfs2_ondisk.h>
48 #include <asm/semaphore.h>
51 #include "lm_interface.h"
65 #include "ops_address.h"
71 static uint64_t qd2offset(struct gfs2_quota_data *qd)
75 offset = 2 * (uint64_t)qd->qd_id + !test_bit(QDF_USER, &qd->qd_flags);
76 offset *= sizeof(struct gfs2_quota);
81 static int qd_alloc(struct gfs2_sbd *sdp, int user, uint32_t id,
82 struct gfs2_quota_data **qdp)
84 struct gfs2_quota_data *qd;
87 qd = kzalloc(sizeof(struct gfs2_quota_data), GFP_KERNEL);
94 set_bit(QDF_USER, &qd->qd_flags);
97 error = gfs2_glock_get(sdp, 2 * (uint64_t)id + !user,
98 &gfs2_quota_glops, CREATE, &qd->qd_gl);
102 error = gfs2_lvb_hold(qd->qd_gl);
103 gfs2_glock_put(qd->qd_gl);
116 static int qd_get(struct gfs2_sbd *sdp, int user, uint32_t id, int create,
117 struct gfs2_quota_data **qdp)
119 struct gfs2_quota_data *qd = NULL, *new_qd = NULL;
126 spin_lock(&sdp->sd_quota_spin);
127 list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) {
128 if (qd->qd_id == id &&
129 !test_bit(QDF_USER, &qd->qd_flags) == !user) {
141 list_add(&qd->qd_list, &sdp->sd_quota_list);
142 atomic_inc(&sdp->sd_quota_count);
146 spin_unlock(&sdp->sd_quota_spin);
150 gfs2_lvb_unhold(new_qd->qd_gl);
157 error = qd_alloc(sdp, user, id, &new_qd);
163 static void qd_hold(struct gfs2_quota_data *qd)
165 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
167 spin_lock(&sdp->sd_quota_spin);
168 gfs2_assert(sdp, qd->qd_count);
170 spin_unlock(&sdp->sd_quota_spin);
173 static void qd_put(struct gfs2_quota_data *qd)
175 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
176 spin_lock(&sdp->sd_quota_spin);
177 gfs2_assert(sdp, qd->qd_count);
179 qd->qd_last_touched = jiffies;
180 spin_unlock(&sdp->sd_quota_spin);
183 static int slot_get(struct gfs2_quota_data *qd)
185 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
186 unsigned int c, o = 0, b;
187 unsigned char byte = 0;
189 spin_lock(&sdp->sd_quota_spin);
191 if (qd->qd_slot_count++) {
192 spin_unlock(&sdp->sd_quota_spin);
196 for (c = 0; c < sdp->sd_quota_chunks; c++)
197 for (o = 0; o < PAGE_SIZE; o++) {
198 byte = sdp->sd_quota_bitmap[c][o];
206 for (b = 0; b < 8; b++)
207 if (!(byte & (1 << b)))
209 qd->qd_slot = c * (8 * PAGE_SIZE) + o * 8 + b;
211 if (qd->qd_slot >= sdp->sd_quota_slots)
214 sdp->sd_quota_bitmap[c][o] |= 1 << b;
216 spin_unlock(&sdp->sd_quota_spin);
222 spin_unlock(&sdp->sd_quota_spin);
226 static void slot_hold(struct gfs2_quota_data *qd)
228 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
230 spin_lock(&sdp->sd_quota_spin);
231 gfs2_assert(sdp, qd->qd_slot_count);
233 spin_unlock(&sdp->sd_quota_spin);
236 static void slot_put(struct gfs2_quota_data *qd)
238 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
240 spin_lock(&sdp->sd_quota_spin);
241 gfs2_assert(sdp, qd->qd_slot_count);
242 if (!--qd->qd_slot_count) {
243 gfs2_icbit_munge(sdp, sdp->sd_quota_bitmap, qd->qd_slot, 0);
246 spin_unlock(&sdp->sd_quota_spin);
249 static int bh_get(struct gfs2_quota_data *qd)
251 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
252 struct gfs2_inode *ip = sdp->sd_qc_inode->u.generic_ip;
253 unsigned int block, offset;
256 struct buffer_head *bh;
260 mutex_lock(&sdp->sd_quota_mutex);
262 if (qd->qd_bh_count++) {
263 mutex_unlock(&sdp->sd_quota_mutex);
267 block = qd->qd_slot / sdp->sd_qc_per_block;
268 offset = qd->qd_slot % sdp->sd_qc_per_block;;
270 error = gfs2_block_map(ip->i_vnode, block, &new, &dblock, &boundary);
273 error = gfs2_meta_read(ip->i_gl, dblock, DIO_START | DIO_WAIT, &bh);
277 if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC))
281 qd->qd_bh_qc = (struct gfs2_quota_change *)
282 (bh->b_data + sizeof(struct gfs2_meta_header) +
283 offset * sizeof(struct gfs2_quota_change));
285 mutex_lock(&sdp->sd_quota_mutex);
294 mutex_unlock(&sdp->sd_quota_mutex);
298 static void bh_put(struct gfs2_quota_data *qd)
300 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
302 mutex_lock(&sdp->sd_quota_mutex);
303 gfs2_assert(sdp, qd->qd_bh_count);
304 if (!--qd->qd_bh_count) {
309 mutex_unlock(&sdp->sd_quota_mutex);
312 static int qd_fish(struct gfs2_sbd *sdp, struct gfs2_quota_data **qdp)
314 struct gfs2_quota_data *qd = NULL;
320 if (sdp->sd_vfs->s_flags & MS_RDONLY)
323 spin_lock(&sdp->sd_quota_spin);
325 list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) {
326 if (test_bit(QDF_LOCKED, &qd->qd_flags) ||
327 !test_bit(QDF_CHANGE, &qd->qd_flags) ||
328 qd->qd_sync_gen >= sdp->sd_quota_sync_gen)
331 list_move_tail(&qd->qd_list, &sdp->sd_quota_list);
333 set_bit(QDF_LOCKED, &qd->qd_flags);
334 gfs2_assert_warn(sdp, qd->qd_count);
336 qd->qd_change_sync = qd->qd_change;
337 gfs2_assert_warn(sdp, qd->qd_slot_count);
347 spin_unlock(&sdp->sd_quota_spin);
350 gfs2_assert_warn(sdp, qd->qd_change_sync);
353 clear_bit(QDF_LOCKED, &qd->qd_flags);
365 static int qd_trylock(struct gfs2_quota_data *qd)
367 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
369 if (sdp->sd_vfs->s_flags & MS_RDONLY)
372 spin_lock(&sdp->sd_quota_spin);
374 if (test_bit(QDF_LOCKED, &qd->qd_flags) ||
375 !test_bit(QDF_CHANGE, &qd->qd_flags)) {
376 spin_unlock(&sdp->sd_quota_spin);
380 list_move_tail(&qd->qd_list, &sdp->sd_quota_list);
382 set_bit(QDF_LOCKED, &qd->qd_flags);
383 gfs2_assert_warn(sdp, qd->qd_count);
385 qd->qd_change_sync = qd->qd_change;
386 gfs2_assert_warn(sdp, qd->qd_slot_count);
389 spin_unlock(&sdp->sd_quota_spin);
391 gfs2_assert_warn(sdp, qd->qd_change_sync);
393 clear_bit(QDF_LOCKED, &qd->qd_flags);
402 static void qd_unlock(struct gfs2_quota_data *qd)
404 gfs2_assert_warn(qd->qd_gl->gl_sbd,
405 test_bit(QDF_LOCKED, &qd->qd_flags));
406 clear_bit(QDF_LOCKED, &qd->qd_flags);
412 static int qdsb_get(struct gfs2_sbd *sdp, int user, uint32_t id, int create,
413 struct gfs2_quota_data **qdp)
417 error = qd_get(sdp, user, id, create, qdp);
421 error = slot_get(*qdp);
425 error = bh_get(*qdp);
439 static void qdsb_put(struct gfs2_quota_data *qd)
446 int gfs2_quota_hold(struct gfs2_inode *ip, uint32_t uid, uint32_t gid)
448 struct gfs2_sbd *sdp = ip->i_sbd;
449 struct gfs2_alloc *al = &ip->i_alloc;
450 struct gfs2_quota_data **qd = al->al_qd;
453 if (gfs2_assert_warn(sdp, !al->al_qd_num) ||
454 gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags)))
457 if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
460 error = qdsb_get(sdp, QUOTA_USER, ip->i_di.di_uid, CREATE, qd);
466 error = qdsb_get(sdp, QUOTA_GROUP, ip->i_di.di_gid, CREATE, qd);
472 if (uid != NO_QUOTA_CHANGE && uid != ip->i_di.di_uid) {
473 error = qdsb_get(sdp, QUOTA_USER, uid, CREATE, qd);
480 if (gid != NO_QUOTA_CHANGE && gid != ip->i_di.di_gid) {
481 error = qdsb_get(sdp, QUOTA_GROUP, gid, CREATE, qd);
490 gfs2_quota_unhold(ip);
495 void gfs2_quota_unhold(struct gfs2_inode *ip)
497 struct gfs2_sbd *sdp = ip->i_sbd;
498 struct gfs2_alloc *al = &ip->i_alloc;
501 gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags));
503 for (x = 0; x < al->al_qd_num; x++) {
504 qdsb_put(al->al_qd[x]);
510 static int sort_qd(const void *a, const void *b)
512 struct gfs2_quota_data *qd_a = *(struct gfs2_quota_data **)a;
513 struct gfs2_quota_data *qd_b = *(struct gfs2_quota_data **)b;
516 if (!test_bit(QDF_USER, &qd_a->qd_flags) !=
517 !test_bit(QDF_USER, &qd_b->qd_flags)) {
518 if (test_bit(QDF_USER, &qd_a->qd_flags))
523 if (qd_a->qd_id < qd_b->qd_id)
525 else if (qd_a->qd_id > qd_b->qd_id)
532 static void do_qc(struct gfs2_quota_data *qd, int64_t change)
534 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
535 struct gfs2_inode *ip = sdp->sd_qc_inode->u.generic_ip;
536 struct gfs2_quota_change *qc = qd->qd_bh_qc;
539 mutex_lock(&sdp->sd_quota_mutex);
540 gfs2_trans_add_bh(ip->i_gl, qd->qd_bh, 1);
542 if (!test_bit(QDF_CHANGE, &qd->qd_flags)) {
545 if (test_bit(QDF_USER, &qd->qd_flags))
546 qc->qc_flags = cpu_to_be32(GFS2_QCF_USER);
547 qc->qc_id = cpu_to_be32(qd->qd_id);
551 x = be64_to_cpu(x) + change;
552 qc->qc_change = cpu_to_be64(x);
554 spin_lock(&sdp->sd_quota_spin);
556 spin_unlock(&sdp->sd_quota_spin);
559 gfs2_assert_warn(sdp, test_bit(QDF_CHANGE, &qd->qd_flags));
560 clear_bit(QDF_CHANGE, &qd->qd_flags);
565 } else if (!test_and_set_bit(QDF_CHANGE, &qd->qd_flags)) {
570 mutex_unlock(&sdp->sd_quota_mutex);
576 * This function was mostly borrowed from gfs2_block_truncate_page which was
577 * in turn mostly borrowed from ext3
579 static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
580 int64_t change, struct gfs2_quota_data *qd)
582 struct inode *inode = ip->i_vnode;
583 struct address_space *mapping = inode->i_mapping;
584 unsigned long index = loc >> PAGE_CACHE_SHIFT;
585 unsigned offset = loc & (PAGE_CACHE_SHIFT - 1);
586 unsigned blocksize, iblock, pos;
587 struct buffer_head *bh;
594 page = grab_cache_page(mapping, index);
598 blocksize = inode->i_sb->s_blocksize;
599 iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
601 if (!page_has_buffers(page))
602 create_empty_buffers(page, blocksize, 0);
604 bh = page_buffers(page);
606 while (offset >= pos) {
607 bh = bh->b_this_page;
612 if (!buffer_mapped(bh)) {
613 gfs2_get_block(inode, iblock, bh, 1);
614 if (!buffer_mapped(bh))
618 if (PageUptodate(page))
619 set_buffer_uptodate(bh);
621 if (!buffer_uptodate(bh)) {
622 ll_rw_block(READ, 1, &bh);
624 if (!buffer_uptodate(bh))
628 gfs2_trans_add_bh(ip->i_gl, bh, 0);
630 kaddr = kmap_atomic(page, KM_USER0);
631 ptr = (__be64 *)(kaddr + offset);
632 value = *ptr = cpu_to_be64(be64_to_cpu(*ptr) + change);
633 flush_dcache_page(page);
634 kunmap_atomic(kaddr, KM_USER0);
636 qd->qd_qb.qb_magic = cpu_to_be32(GFS2_MAGIC);
638 qd->qd_qb.qb_limit = cpu_to_be64(q.qu_limit);
639 qd->qd_qb.qb_warn = cpu_to_be64(q.qu_warn);
641 qd->qd_qb.qb_value = cpu_to_be64(value);
644 page_cache_release(page);
648 static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
650 struct gfs2_sbd *sdp = (*qda)->qd_gl->gl_sbd;
651 struct gfs2_inode *ip = sdp->sd_quota_inode->u.generic_ip;
652 unsigned int data_blocks, ind_blocks;
653 struct file_ra_state ra_state;
654 struct gfs2_holder *ghs, i_gh;
656 struct gfs2_quota_data *qd;
658 unsigned int nalloc = 0;
659 struct gfs2_alloc *al = NULL;
662 gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota),
663 &data_blocks, &ind_blocks);
665 ghs = kcalloc(num_qd, sizeof(struct gfs2_holder), GFP_KERNEL);
669 sort(qda, num_qd, sizeof(struct gfs2_quota_data *), sort_qd, NULL);
670 for (qx = 0; qx < num_qd; qx++) {
671 error = gfs2_glock_nq_init(qda[qx]->qd_gl,
673 GL_NOCACHE, &ghs[qx]);
678 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
682 for (x = 0; x < num_qd; x++) {
685 offset = qd2offset(qda[x]);
686 error = gfs2_write_alloc_required(ip, offset,
687 sizeof(struct gfs2_quota),
696 al = gfs2_alloc_get(ip);
698 al->al_requested = nalloc * (data_blocks + ind_blocks);
700 error = gfs2_inplace_reserve(ip);
704 error = gfs2_trans_begin(sdp,
705 al->al_rgd->rd_ri.ri_length +
706 num_qd * data_blocks +
707 nalloc * ind_blocks +
708 RES_DINODE + num_qd +
713 error = gfs2_trans_begin(sdp,
714 num_qd * data_blocks +
715 RES_DINODE + num_qd, 0);
720 file_ra_state_init(&ra_state, ip->i_vnode->i_mapping);
721 for (x = 0; x < num_qd; x++) {
723 offset = qd2offset(qd);
724 error = gfs2_adjust_quota(ip, offset, qd->qd_change_sync,
725 (struct gfs2_quota_data *)
730 do_qc(qd, -qd->qd_change_sync);
740 gfs2_inplace_release(ip);
747 gfs2_glock_dq_uninit(&i_gh);
751 gfs2_glock_dq_uninit(&ghs[qx]);
753 gfs2_log_flush(ip->i_gl->gl_sbd, ip->i_gl);
758 static int do_glock(struct gfs2_quota_data *qd, int force_refresh,
759 struct gfs2_holder *q_gh)
761 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
762 struct gfs2_inode *ip = sdp->sd_quota_inode->u.generic_ip;
763 struct gfs2_holder i_gh;
765 char buf[sizeof(struct gfs2_quota)];
766 struct file_ra_state ra_state;
769 file_ra_state_init(&ra_state, sdp->sd_quota_inode->i_mapping);
771 error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_SHARED, 0, q_gh);
775 gfs2_quota_lvb_in(&qd->qd_qb, qd->qd_gl->gl_lvb);
777 if (force_refresh || qd->qd_qb.qb_magic != GFS2_MAGIC) {
779 gfs2_glock_dq_uninit(q_gh);
780 error = gfs2_glock_nq_init(qd->qd_gl,
781 LM_ST_EXCLUSIVE, GL_NOCACHE,
786 error = gfs2_glock_nq_init(ip->i_gl,
792 memset(buf, 0, sizeof(struct gfs2_quota));
794 error = gfs2_internal_read(ip,
797 sizeof(struct gfs2_quota));
801 gfs2_glock_dq_uninit(&i_gh);
803 gfs2_quota_in(&q, buf);
805 memset(&qd->qd_qb, 0, sizeof(struct gfs2_quota_lvb));
806 qd->qd_qb.qb_magic = GFS2_MAGIC;
807 qd->qd_qb.qb_limit = q.qu_limit;
808 qd->qd_qb.qb_warn = q.qu_warn;
809 qd->qd_qb.qb_value = q.qu_value;
811 gfs2_quota_lvb_out(&qd->qd_qb, qd->qd_gl->gl_lvb);
813 if (gfs2_glock_is_blocking(qd->qd_gl)) {
814 gfs2_glock_dq_uninit(q_gh);
823 gfs2_glock_dq_uninit(&i_gh);
826 gfs2_glock_dq_uninit(q_gh);
831 int gfs2_quota_lock(struct gfs2_inode *ip, uint32_t uid, uint32_t gid)
833 struct gfs2_sbd *sdp = ip->i_sbd;
834 struct gfs2_alloc *al = &ip->i_alloc;
838 gfs2_quota_hold(ip, uid, gid);
840 if (capable(CAP_SYS_RESOURCE) ||
841 sdp->sd_args.ar_quota != GFS2_QUOTA_ON)
844 sort(al->al_qd, al->al_qd_num, sizeof(struct gfs2_quota_data *),
847 for (x = 0; x < al->al_qd_num; x++) {
848 error = do_glock(al->al_qd[x], NO_FORCE, &al->al_qd_ghs[x]);
854 set_bit(GIF_QD_LOCKED, &ip->i_flags);
857 gfs2_glock_dq_uninit(&al->al_qd_ghs[x]);
858 gfs2_quota_unhold(ip);
864 static int need_sync(struct gfs2_quota_data *qd)
866 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
867 struct gfs2_tune *gt = &sdp->sd_tune;
869 unsigned int num, den;
872 if (!qd->qd_qb.qb_limit)
875 spin_lock(&sdp->sd_quota_spin);
876 value = qd->qd_change;
877 spin_unlock(&sdp->sd_quota_spin);
879 spin_lock(>->gt_spin);
880 num = gt->gt_quota_scale_num;
881 den = gt->gt_quota_scale_den;
882 spin_unlock(>->gt_spin);
886 else if (qd->qd_qb.qb_value >= (int64_t)qd->qd_qb.qb_limit)
889 value *= gfs2_jindex_size(sdp) * num;
891 value += qd->qd_qb.qb_value;
892 if (value < (int64_t)qd->qd_qb.qb_limit)
899 void gfs2_quota_unlock(struct gfs2_inode *ip)
901 struct gfs2_alloc *al = &ip->i_alloc;
902 struct gfs2_quota_data *qda[4];
903 unsigned int count = 0;
906 if (!test_and_clear_bit(GIF_QD_LOCKED, &ip->i_flags))
909 for (x = 0; x < al->al_qd_num; x++) {
910 struct gfs2_quota_data *qd;
914 sync = need_sync(qd);
916 gfs2_glock_dq_uninit(&al->al_qd_ghs[x]);
918 if (sync && qd_trylock(qd))
924 for (x = 0; x < count; x++)
929 gfs2_quota_unhold(ip);
934 static int print_message(struct gfs2_quota_data *qd, char *type)
936 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
940 line = kmalloc(MAX_LINE, GFP_KERNEL);
944 len = snprintf(line, MAX_LINE-1,
945 "GFS2: fsid=%s: quota %s for %s %u\r\n",
946 sdp->sd_fsname, type,
947 (test_bit(QDF_USER, &qd->qd_flags)) ? "user" : "group",
949 line[MAX_LINE-1] = 0;
951 if (current->signal) { /* Is this test still required? */
952 tty_write_message(current->signal->tty, line);
960 int gfs2_quota_check(struct gfs2_inode *ip, uint32_t uid, uint32_t gid)
962 struct gfs2_sbd *sdp = ip->i_sbd;
963 struct gfs2_alloc *al = &ip->i_alloc;
964 struct gfs2_quota_data *qd;
969 if (!test_bit(GIF_QD_LOCKED, &ip->i_flags))
972 if (sdp->sd_args.ar_quota != GFS2_QUOTA_ON)
975 for (x = 0; x < al->al_qd_num; x++) {
978 if (!((qd->qd_id == uid && test_bit(QDF_USER, &qd->qd_flags)) ||
979 (qd->qd_id == gid && !test_bit(QDF_USER, &qd->qd_flags))))
982 value = qd->qd_qb.qb_value;
983 spin_lock(&sdp->sd_quota_spin);
984 value += qd->qd_change;
985 spin_unlock(&sdp->sd_quota_spin);
987 if (qd->qd_qb.qb_limit && (int64_t)qd->qd_qb.qb_limit < value) {
988 print_message(qd, "exceeded");
991 } else if (qd->qd_qb.qb_warn &&
992 (int64_t)qd->qd_qb.qb_warn < value &&
993 time_after_eq(jiffies, qd->qd_last_warn +
995 gt_quota_warn_period) * HZ)) {
996 error = print_message(qd, "warning");
997 qd->qd_last_warn = jiffies;
1004 void gfs2_quota_change(struct gfs2_inode *ip, int64_t change,
1005 uint32_t uid, uint32_t gid)
1007 struct gfs2_alloc *al = &ip->i_alloc;
1008 struct gfs2_quota_data *qd;
1010 unsigned int found = 0;
1012 if (gfs2_assert_warn(ip->i_sbd, change))
1014 if (ip->i_di.di_flags & GFS2_DIF_SYSTEM)
1017 for (x = 0; x < al->al_qd_num; x++) {
1020 if ((qd->qd_id == uid && test_bit(QDF_USER, &qd->qd_flags)) ||
1021 (qd->qd_id == gid && !test_bit(QDF_USER, &qd->qd_flags))) {
1028 int gfs2_quota_sync(struct gfs2_sbd *sdp)
1030 struct gfs2_quota_data **qda;
1031 unsigned int max_qd = gfs2_tune_get(sdp, gt_quota_simul_sync);
1032 unsigned int num_qd;
1036 sdp->sd_quota_sync_gen++;
1038 qda = kcalloc(max_qd, sizeof(struct gfs2_quota_data *), GFP_KERNEL);
1046 error = qd_fish(sdp, qda + num_qd);
1047 if (error || !qda[num_qd])
1049 if (++num_qd == max_qd)
1055 error = do_sync(num_qd, qda);
1057 for (x = 0; x < num_qd; x++)
1058 qda[x]->qd_sync_gen =
1059 sdp->sd_quota_sync_gen;
1061 for (x = 0; x < num_qd; x++)
1064 } while (!error && num_qd == max_qd);
1071 int gfs2_quota_refresh(struct gfs2_sbd *sdp, int user, uint32_t id)
1073 struct gfs2_quota_data *qd;
1074 struct gfs2_holder q_gh;
1077 error = qd_get(sdp, user, id, CREATE, &qd);
1081 error = do_glock(qd, FORCE, &q_gh);
1083 gfs2_glock_dq_uninit(&q_gh);
1091 int gfs2_quota_read(struct gfs2_sbd *sdp, int user, uint32_t id,
1092 struct gfs2_quota *q)
1094 struct gfs2_quota_data *qd;
1095 struct gfs2_holder q_gh;
1098 if (((user) ? (id != current->fsuid) : (!in_group_p(id))) &&
1099 !capable(CAP_SYS_ADMIN))
1102 error = qd_get(sdp, user, id, CREATE, &qd);
1106 error = do_glock(qd, NO_FORCE, &q_gh);
1110 memset(q, 0, sizeof(struct gfs2_quota));
1111 q->qu_limit = qd->qd_qb.qb_limit;
1112 q->qu_warn = qd->qd_qb.qb_warn;
1113 q->qu_value = qd->qd_qb.qb_value;
1115 spin_lock(&sdp->sd_quota_spin);
1116 q->qu_value += qd->qd_change;
1117 spin_unlock(&sdp->sd_quota_spin);
1119 gfs2_glock_dq_uninit(&q_gh);
1128 int gfs2_quota_init(struct gfs2_sbd *sdp)
1130 struct gfs2_inode *ip = sdp->sd_qc_inode->u.generic_ip;
1131 unsigned int blocks = ip->i_di.di_size >> sdp->sd_sb.sb_bsize_shift;
1132 unsigned int x, slot = 0;
1133 unsigned int found = 0;
1135 uint32_t extlen = 0;
1138 if (!ip->i_di.di_size ||
1139 ip->i_di.di_size > (64 << 20) ||
1140 ip->i_di.di_size & (sdp->sd_sb.sb_bsize - 1)) {
1141 gfs2_consist_inode(ip);
1144 sdp->sd_quota_slots = blocks * sdp->sd_qc_per_block;
1145 sdp->sd_quota_chunks = DIV_ROUND_UP(sdp->sd_quota_slots, 8 * PAGE_SIZE);
1149 sdp->sd_quota_bitmap = kcalloc(sdp->sd_quota_chunks,
1150 sizeof(unsigned char *), GFP_KERNEL);
1151 if (!sdp->sd_quota_bitmap)
1154 for (x = 0; x < sdp->sd_quota_chunks; x++) {
1155 sdp->sd_quota_bitmap[x] = kzalloc(PAGE_SIZE, GFP_KERNEL);
1156 if (!sdp->sd_quota_bitmap[x])
1160 for (x = 0; x < blocks; x++) {
1161 struct buffer_head *bh;
1166 error = gfs2_extent_map(ip->i_vnode, x, &new, &dblock, &extlen);
1170 gfs2_meta_ra(ip->i_gl, dblock, extlen);
1171 error = gfs2_meta_read(ip->i_gl, dblock, DIO_START | DIO_WAIT,
1176 if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC)) {
1182 y < sdp->sd_qc_per_block && slot < sdp->sd_quota_slots;
1184 struct gfs2_quota_change qc;
1185 struct gfs2_quota_data *qd;
1187 gfs2_quota_change_in(&qc, bh->b_data +
1188 sizeof(struct gfs2_meta_header) +
1189 y * sizeof(struct gfs2_quota_change));
1193 error = qd_alloc(sdp, (qc.qc_flags & GFS2_QCF_USER),
1200 set_bit(QDF_CHANGE, &qd->qd_flags);
1201 qd->qd_change = qc.qc_change;
1203 qd->qd_slot_count = 1;
1204 qd->qd_last_touched = jiffies;
1206 spin_lock(&sdp->sd_quota_spin);
1207 gfs2_icbit_munge(sdp, sdp->sd_quota_bitmap, slot, 1);
1208 list_add(&qd->qd_list, &sdp->sd_quota_list);
1209 atomic_inc(&sdp->sd_quota_count);
1210 spin_unlock(&sdp->sd_quota_spin);
1221 fs_info(sdp, "found %u quota changes\n", found);
1226 gfs2_quota_cleanup(sdp);
1230 void gfs2_quota_scan(struct gfs2_sbd *sdp)
1232 struct gfs2_quota_data *qd, *safe;
1235 spin_lock(&sdp->sd_quota_spin);
1236 list_for_each_entry_safe(qd, safe, &sdp->sd_quota_list, qd_list) {
1237 if (!qd->qd_count &&
1238 time_after_eq(jiffies, qd->qd_last_touched +
1239 gfs2_tune_get(sdp, gt_quota_cache_secs) * HZ)) {
1240 list_move(&qd->qd_list, &dead);
1241 gfs2_assert_warn(sdp,
1242 atomic_read(&sdp->sd_quota_count) > 0);
1243 atomic_dec(&sdp->sd_quota_count);
1246 spin_unlock(&sdp->sd_quota_spin);
1248 while (!list_empty(&dead)) {
1249 qd = list_entry(dead.next, struct gfs2_quota_data, qd_list);
1250 list_del(&qd->qd_list);
1252 gfs2_assert_warn(sdp, !qd->qd_change);
1253 gfs2_assert_warn(sdp, !qd->qd_slot_count);
1254 gfs2_assert_warn(sdp, !qd->qd_bh_count);
1256 gfs2_lvb_unhold(qd->qd_gl);
1261 void gfs2_quota_cleanup(struct gfs2_sbd *sdp)
1263 struct list_head *head = &sdp->sd_quota_list;
1264 struct gfs2_quota_data *qd;
1267 spin_lock(&sdp->sd_quota_spin);
1268 while (!list_empty(head)) {
1269 qd = list_entry(head->prev, struct gfs2_quota_data, qd_list);
1271 if (qd->qd_count > 1 ||
1272 (qd->qd_count && !test_bit(QDF_CHANGE, &qd->qd_flags))) {
1273 list_move(&qd->qd_list, head);
1274 spin_unlock(&sdp->sd_quota_spin);
1276 spin_lock(&sdp->sd_quota_spin);
1280 list_del(&qd->qd_list);
1281 atomic_dec(&sdp->sd_quota_count);
1282 spin_unlock(&sdp->sd_quota_spin);
1284 if (!qd->qd_count) {
1285 gfs2_assert_warn(sdp, !qd->qd_change);
1286 gfs2_assert_warn(sdp, !qd->qd_slot_count);
1288 gfs2_assert_warn(sdp, qd->qd_slot_count == 1);
1289 gfs2_assert_warn(sdp, !qd->qd_bh_count);
1291 gfs2_lvb_unhold(qd->qd_gl);
1294 spin_lock(&sdp->sd_quota_spin);
1296 spin_unlock(&sdp->sd_quota_spin);
1298 gfs2_assert_warn(sdp, !atomic_read(&sdp->sd_quota_count));
1300 if (sdp->sd_quota_bitmap) {
1301 for (x = 0; x < sdp->sd_quota_chunks; x++)
1302 kfree(sdp->sd_quota_bitmap[x]);
1303 kfree(sdp->sd_quota_bitmap);