2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License v.2.
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <linux/kthread.h>
16 #include <asm/semaphore.h>
25 static int munge_ondisk(struct gfs2_sbd *sdp, unsigned int slot,
26 struct gfs2_unlinked_tag *ut)
28 struct gfs2_inode *ip = sdp->sd_ut_inode;
29 unsigned int block, offset;
32 struct buffer_head *bh;
35 block = slot / sdp->sd_ut_per_block;
36 offset = slot % sdp->sd_ut_per_block;
38 error = gfs2_block_map(ip, block, &new, &dblock, NULL);
41 error = gfs2_meta_read(ip->i_gl, dblock, DIO_START | DIO_WAIT, &bh);
44 if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_UT)) {
49 down(&sdp->sd_unlinked_mutex);
50 gfs2_trans_add_bh(ip->i_gl, bh);
51 gfs2_unlinked_tag_out(ut, bh->b_data +
52 sizeof(struct gfs2_meta_header) +
53 offset * sizeof(struct gfs2_unlinked_tag));
54 up(&sdp->sd_unlinked_mutex);
62 static void ul_hash(struct gfs2_sbd *sdp, struct gfs2_unlinked *ul)
64 spin_lock(&sdp->sd_unlinked_spin);
65 list_add(&ul->ul_list, &sdp->sd_unlinked_list);
66 gfs2_assert(sdp, ul->ul_count);
68 atomic_inc(&sdp->sd_unlinked_count);
69 spin_unlock(&sdp->sd_unlinked_spin);
72 static void ul_unhash(struct gfs2_sbd *sdp, struct gfs2_unlinked *ul)
74 spin_lock(&sdp->sd_unlinked_spin);
75 list_del_init(&ul->ul_list);
76 gfs2_assert(sdp, ul->ul_count > 1);
78 gfs2_assert_warn(sdp, atomic_read(&sdp->sd_unlinked_count) > 0);
79 atomic_dec(&sdp->sd_unlinked_count);
80 spin_unlock(&sdp->sd_unlinked_spin);
83 static struct gfs2_unlinked *ul_fish(struct gfs2_sbd *sdp)
85 struct list_head *head;
86 struct gfs2_unlinked *ul;
89 if (sdp->sd_vfs->s_flags & MS_RDONLY)
92 spin_lock(&sdp->sd_unlinked_spin);
94 head = &sdp->sd_unlinked_list;
96 list_for_each_entry(ul, head, ul_list) {
97 if (test_bit(ULF_LOCKED, &ul->ul_flags))
100 list_move_tail(&ul->ul_list, head);
102 set_bit(ULF_LOCKED, &ul->ul_flags);
111 spin_unlock(&sdp->sd_unlinked_spin);
117 * enforce_limit - limit the number of inodes waiting to be deallocated
118 * @sdp: the filesystem
123 static void enforce_limit(struct gfs2_sbd *sdp)
125 unsigned int tries = 0, min = 0;
128 if (atomic_read(&sdp->sd_unlinked_count) >=
129 gfs2_tune_get(sdp, gt_ilimit)) {
130 tries = gfs2_tune_get(sdp, gt_ilimit_tries);
131 min = gfs2_tune_get(sdp, gt_ilimit_min);
135 struct gfs2_unlinked *ul = ul_fish(sdp);
138 error = gfs2_inode_dealloc(sdp, ul);
139 gfs2_unlinked_put(sdp, ul);
144 } else if (error != 1)
149 static struct gfs2_unlinked *ul_alloc(struct gfs2_sbd *sdp)
151 struct gfs2_unlinked *ul;
153 ul = kzalloc(sizeof(struct gfs2_unlinked), GFP_KERNEL);
155 INIT_LIST_HEAD(&ul->ul_list);
157 set_bit(ULF_LOCKED, &ul->ul_flags);
163 int gfs2_unlinked_get(struct gfs2_sbd *sdp, struct gfs2_unlinked **ul)
165 unsigned int c, o = 0, b;
166 unsigned char byte = 0;
174 spin_lock(&sdp->sd_unlinked_spin);
176 for (c = 0; c < sdp->sd_unlinked_chunks; c++)
177 for (o = 0; o < PAGE_SIZE; o++) {
178 byte = sdp->sd_unlinked_bitmap[c][o];
186 for (b = 0; b < 8; b++)
187 if (!(byte & (1 << b)))
189 (*ul)->ul_slot = c * (8 * PAGE_SIZE) + o * 8 + b;
191 if ((*ul)->ul_slot >= sdp->sd_unlinked_slots)
194 sdp->sd_unlinked_bitmap[c][o] |= 1 << b;
196 spin_unlock(&sdp->sd_unlinked_spin);
201 spin_unlock(&sdp->sd_unlinked_spin);
206 void gfs2_unlinked_put(struct gfs2_sbd *sdp, struct gfs2_unlinked *ul)
208 gfs2_assert_warn(sdp, test_and_clear_bit(ULF_LOCKED, &ul->ul_flags));
210 spin_lock(&sdp->sd_unlinked_spin);
211 gfs2_assert(sdp, ul->ul_count);
214 gfs2_icbit_munge(sdp, sdp->sd_unlinked_bitmap, ul->ul_slot, 0);
215 spin_unlock(&sdp->sd_unlinked_spin);
218 spin_unlock(&sdp->sd_unlinked_spin);
221 int gfs2_unlinked_ondisk_add(struct gfs2_sbd *sdp, struct gfs2_unlinked *ul)
225 gfs2_assert_warn(sdp, test_bit(ULF_LOCKED, &ul->ul_flags));
226 gfs2_assert_warn(sdp, list_empty(&ul->ul_list));
228 error = munge_ondisk(sdp, ul->ul_slot, &ul->ul_ut);
235 int gfs2_unlinked_ondisk_munge(struct gfs2_sbd *sdp, struct gfs2_unlinked *ul)
239 gfs2_assert_warn(sdp, test_bit(ULF_LOCKED, &ul->ul_flags));
240 gfs2_assert_warn(sdp, !list_empty(&ul->ul_list));
242 error = munge_ondisk(sdp, ul->ul_slot, &ul->ul_ut);
247 int gfs2_unlinked_ondisk_rm(struct gfs2_sbd *sdp, struct gfs2_unlinked *ul)
249 struct gfs2_unlinked_tag ut;
252 gfs2_assert_warn(sdp, test_bit(ULF_LOCKED, &ul->ul_flags));
253 gfs2_assert_warn(sdp, !list_empty(&ul->ul_list));
255 memset(&ut, 0, sizeof(struct gfs2_unlinked_tag));
257 error = munge_ondisk(sdp, ul->ul_slot, &ut);
267 * gfs2_unlinked_dealloc - Go through the list of inodes to be deallocated
268 * @sdp: the filesystem
273 int gfs2_unlinked_dealloc(struct gfs2_sbd *sdp)
275 unsigned int hits, strikes;
283 struct gfs2_unlinked *ul = ul_fish(sdp);
286 error = gfs2_inode_dealloc(sdp, ul);
287 gfs2_unlinked_put(sdp, ul);
293 } else if (error == 1) {
296 atomic_read(&sdp->sd_unlinked_count)) {
304 if (!hits || kthread_should_stop())
313 int gfs2_unlinked_init(struct gfs2_sbd *sdp)
315 struct gfs2_inode *ip = sdp->sd_ut_inode;
316 unsigned int blocks = ip->i_di.di_size >> sdp->sd_sb.sb_bsize_shift;
317 unsigned int x, slot = 0;
318 unsigned int found = 0;
323 if (!ip->i_di.di_size ||
324 ip->i_di.di_size > (64 << 20) ||
325 ip->i_di.di_size & (sdp->sd_sb.sb_bsize - 1)) {
326 gfs2_consist_inode(ip);
329 sdp->sd_unlinked_slots = blocks * sdp->sd_ut_per_block;
330 sdp->sd_unlinked_chunks = DIV_RU(sdp->sd_unlinked_slots, 8 * PAGE_SIZE);
334 sdp->sd_unlinked_bitmap = kcalloc(sdp->sd_unlinked_chunks,
335 sizeof(unsigned char *),
337 if (!sdp->sd_unlinked_bitmap)
340 for (x = 0; x < sdp->sd_unlinked_chunks; x++) {
341 sdp->sd_unlinked_bitmap[x] = kzalloc(PAGE_SIZE, GFP_KERNEL);
342 if (!sdp->sd_unlinked_bitmap[x])
346 for (x = 0; x < blocks; x++) {
347 struct buffer_head *bh;
352 error = gfs2_block_map(ip, x, &new, &dblock, &extlen);
356 gfs2_meta_ra(ip->i_gl, dblock, extlen);
357 error = gfs2_meta_read(ip->i_gl, dblock, DIO_START | DIO_WAIT,
362 if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_UT)) {
368 y < sdp->sd_ut_per_block && slot < sdp->sd_unlinked_slots;
370 struct gfs2_unlinked_tag ut;
371 struct gfs2_unlinked *ul;
373 gfs2_unlinked_tag_in(&ut, bh->b_data +
374 sizeof(struct gfs2_meta_header) +
375 y * sizeof(struct gfs2_unlinked_tag));
376 if (!ut.ut_inum.no_addr)
388 spin_lock(&sdp->sd_unlinked_spin);
389 gfs2_icbit_munge(sdp, sdp->sd_unlinked_bitmap, slot, 1);
390 spin_unlock(&sdp->sd_unlinked_spin);
393 gfs2_unlinked_put(sdp, ul);
403 fs_info(sdp, "found %u unlinked inodes\n", found);
408 gfs2_unlinked_cleanup(sdp);
413 * gfs2_unlinked_cleanup - get rid of any extra struct gfs2_unlinked structures
414 * @sdp: the filesystem
418 void gfs2_unlinked_cleanup(struct gfs2_sbd *sdp)
420 struct list_head *head = &sdp->sd_unlinked_list;
421 struct gfs2_unlinked *ul;
424 spin_lock(&sdp->sd_unlinked_spin);
425 while (!list_empty(head)) {
426 ul = list_entry(head->next, struct gfs2_unlinked, ul_list);
428 if (ul->ul_count > 1) {
429 list_move_tail(&ul->ul_list, head);
430 spin_unlock(&sdp->sd_unlinked_spin);
432 spin_lock(&sdp->sd_unlinked_spin);
436 list_del_init(&ul->ul_list);
437 atomic_dec(&sdp->sd_unlinked_count);
439 gfs2_assert_warn(sdp, ul->ul_count == 1);
440 gfs2_assert_warn(sdp, !test_bit(ULF_LOCKED, &ul->ul_flags));
443 spin_unlock(&sdp->sd_unlinked_spin);
445 gfs2_assert_warn(sdp, !atomic_read(&sdp->sd_unlinked_count));
447 if (sdp->sd_unlinked_bitmap) {
448 for (x = 0; x < sdp->sd_unlinked_chunks; x++)
449 kfree(sdp->sd_unlinked_bitmap[x]);
450 kfree(sdp->sd_unlinked_bitmap);