2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <linux/delay.h>
16 #include <linux/sort.h>
17 #include <linux/jhash.h>
18 #include <linux/kref.h>
19 #include <linux/kallsyms.h>
20 #include <linux/gfs2_ondisk.h>
21 #include <asm/uaccess.h>
24 #include "lm_interface.h"
36 /* Must be kept in sync with the beginning of struct gfs2_glock */
38 struct list_head gl_list;
39 unsigned long gl_flags;
43 struct gfs2_holder gr_gh;
44 struct work_struct gr_work;
47 struct gfs2_gl_hash_bucket {
48 struct list_head hb_list;
51 typedef void (*glock_examiner) (struct gfs2_glock * gl);
53 static int gfs2_dump_lockstate(struct gfs2_sbd *sdp);
54 static int dump_glock(struct gfs2_glock *gl);
56 #define GFS2_GL_HASH_SHIFT 13
57 #define GFS2_GL_HASH_SIZE (1 << GFS2_GL_HASH_SHIFT)
58 #define GFS2_GL_HASH_MASK (GFS2_GL_HASH_SIZE - 1)
60 static struct gfs2_gl_hash_bucket gl_hash_table[GFS2_GL_HASH_SIZE];
63 * Despite what you might think, the numbers below are not arbitrary :-)
64 * They are taken from the ipv4 routing hash code, which is well tested
65 * and thus should be nearly optimal. Later on we might tweek the numbers
66 * but for now this should be fine.
68 * The reason for putting the locks in a separate array from the list heads
69 * is that we can have fewer locks than list heads and save memory. We use
70 * the same hash function for both, but with a different hash mask.
72 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || \
73 defined(CONFIG_PROVE_LOCKING)
76 # define GL_HASH_LOCK_SZ 256
79 # define GL_HASH_LOCK_SZ 4096
81 # define GL_HASH_LOCK_SZ 2048
83 # define GL_HASH_LOCK_SZ 1024
85 # define GL_HASH_LOCK_SZ 512
87 # define GL_HASH_LOCK_SZ 256
91 /* We never want more locks than chains */
92 #if GFS2_GL_HASH_SIZE < GL_HASH_LOCK_SZ
93 # undef GL_HASH_LOCK_SZ
94 # define GL_HASH_LOCK_SZ GFS2_GL_HASH_SIZE
97 static rwlock_t gl_hash_locks[GL_HASH_LOCK_SZ];
99 static inline rwlock_t *gl_lock_addr(unsigned int x)
101 return &gl_hash_locks[(x) & (GL_HASH_LOCK_SZ-1)];
103 #else /* not SMP, so no spinlocks required */
104 static inline rwlock_t *gl_lock_addr(x)
111 * relaxed_state_ok - is a requested lock compatible with the current lock mode?
112 * @actual: the current state of the lock
113 * @requested: the lock state that was requested by the caller
114 * @flags: the modifier flags passed in by the caller
116 * Returns: 1 if the locks are compatible, 0 otherwise
119 static inline int relaxed_state_ok(unsigned int actual, unsigned requested,
122 if (actual == requested)
125 if (flags & GL_EXACT)
128 if (actual == LM_ST_EXCLUSIVE && requested == LM_ST_SHARED)
131 if (actual != LM_ST_UNLOCKED && (flags & LM_FLAG_ANY))
138 * gl_hash() - Turn glock number into hash bucket number
139 * @lock: The glock number
141 * Returns: The number of the corresponding hash bucket
144 static unsigned int gl_hash(const struct gfs2_sbd *sdp,
145 const struct lm_lockname *name)
149 h = jhash(&name->ln_number, sizeof(u64), 0);
150 h = jhash(&name->ln_type, sizeof(unsigned int), h);
151 h = jhash(&sdp, sizeof(struct gfs2_sbd *), h);
152 h &= GFS2_GL_HASH_MASK;
158 * glock_free() - Perform a few checks and then release struct gfs2_glock
159 * @gl: The glock to release
161 * Also calls lock module to release its internal structure for this glock.
165 static void glock_free(struct gfs2_glock *gl)
167 struct gfs2_sbd *sdp = gl->gl_sbd;
168 struct inode *aspace = gl->gl_aspace;
170 gfs2_lm_put_lock(sdp, gl->gl_lock);
173 gfs2_aspace_put(aspace);
175 kmem_cache_free(gfs2_glock_cachep, gl);
179 * gfs2_glock_hold() - increment reference count on glock
180 * @gl: The glock to hold
184 void gfs2_glock_hold(struct gfs2_glock *gl)
186 kref_get(&gl->gl_ref);
189 /* All work is done after the return from kref_put() so we
190 can release the write_lock before the free. */
192 static void kill_glock(struct kref *kref)
194 struct gfs2_glock *gl = container_of(kref, struct gfs2_glock, gl_ref);
195 struct gfs2_sbd *sdp = gl->gl_sbd;
197 gfs2_assert(sdp, gl->gl_state == LM_ST_UNLOCKED);
198 gfs2_assert(sdp, list_empty(&gl->gl_reclaim));
199 gfs2_assert(sdp, list_empty(&gl->gl_holders));
200 gfs2_assert(sdp, list_empty(&gl->gl_waiters1));
201 gfs2_assert(sdp, list_empty(&gl->gl_waiters2));
202 gfs2_assert(sdp, list_empty(&gl->gl_waiters3));
206 * gfs2_glock_put() - Decrement reference count on glock
207 * @gl: The glock to put
211 int gfs2_glock_put(struct gfs2_glock *gl)
215 write_lock(gl_lock_addr(gl->gl_hash));
216 if (kref_put(&gl->gl_ref, kill_glock)) {
217 list_del_init(&gl_hash_table[gl->gl_hash].hb_list);
218 write_unlock(gl_lock_addr(gl->gl_hash));
219 BUG_ON(spin_is_locked(&gl->gl_spin));
224 write_unlock(gl_lock_addr(gl->gl_hash));
230 * queue_empty - check to see if a glock's queue is empty
232 * @head: the head of the queue to check
234 * This function protects the list in the event that a process already
235 * has a holder on the list and is adding a second holder for itself.
236 * The glmutex lock is what generally prevents processes from working
237 * on the same glock at once, but the special case of adding a second
238 * holder for yourself ("recursive" locking) doesn't involve locking
239 * glmutex, making the spin lock necessary.
241 * Returns: 1 if the queue is empty
244 static inline int queue_empty(struct gfs2_glock *gl, struct list_head *head)
247 spin_lock(&gl->gl_spin);
248 empty = list_empty(head);
249 spin_unlock(&gl->gl_spin);
254 * search_bucket() - Find struct gfs2_glock by lock number
255 * @bucket: the bucket to search
256 * @name: The lock name
258 * Returns: NULL, or the struct gfs2_glock with the requested number
261 static struct gfs2_glock *search_bucket(unsigned int hash,
262 const struct gfs2_sbd *sdp,
263 const struct lm_lockname *name)
265 struct gfs2_glock *gl;
267 list_for_each_entry(gl, &gl_hash_table[hash].hb_list, gl_list) {
268 if (test_bit(GLF_PLUG, &gl->gl_flags))
270 if (!lm_name_equal(&gl->gl_name, name))
272 if (gl->gl_sbd != sdp)
275 kref_get(&gl->gl_ref);
284 * gfs2_glock_find() - Find glock by lock number
285 * @sdp: The GFS2 superblock
286 * @name: The lock name
288 * Returns: NULL, or the struct gfs2_glock with the requested number
291 static struct gfs2_glock *gfs2_glock_find(const struct gfs2_sbd *sdp,
292 const struct lm_lockname *name)
294 unsigned int hash = gl_hash(sdp, name);
295 struct gfs2_glock *gl;
297 read_lock(gl_lock_addr(hash));
298 gl = search_bucket(hash, sdp, name);
299 read_unlock(gl_lock_addr(hash));
305 * gfs2_glock_get() - Get a glock, or create one if one doesn't exist
306 * @sdp: The GFS2 superblock
307 * @number: the lock number
308 * @glops: The glock_operations to use
309 * @create: If 0, don't create the glock if it doesn't exist
310 * @glp: the glock is returned here
312 * This does not lock a glock, just finds/creates structures for one.
317 int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
318 const struct gfs2_glock_operations *glops, int create,
319 struct gfs2_glock **glp)
321 struct lm_lockname name = { .ln_number = number, .ln_type = glops->go_type };
322 struct gfs2_glock *gl, *tmp;
323 unsigned int hash = gl_hash(sdp, &name);
326 read_lock(gl_lock_addr(hash));
327 gl = search_bucket(hash, sdp, &name);
328 read_unlock(gl_lock_addr(hash));
335 gl = kmem_cache_alloc(gfs2_glock_cachep, GFP_KERNEL);
341 kref_init(&gl->gl_ref);
342 gl->gl_state = LM_ST_UNLOCKED;
347 gl->gl_req_gh = NULL;
348 gl->gl_req_bh = NULL;
350 gl->gl_stamp = jiffies;
351 gl->gl_object = NULL;
353 gl->gl_aspace = NULL;
354 lops_init_le(&gl->gl_le, &gfs2_glock_lops);
356 /* If this glock protects actual on-disk data or metadata blocks,
357 create a VFS inode to manage the pages/buffers holding them. */
358 if (glops == &gfs2_inode_glops || glops == &gfs2_rgrp_glops) {
359 gl->gl_aspace = gfs2_aspace_get(sdp);
360 if (!gl->gl_aspace) {
366 error = gfs2_lm_get_lock(sdp, &name, &gl->gl_lock);
370 write_lock(gl_lock_addr(hash));
371 tmp = search_bucket(hash, sdp, &name);
373 write_unlock(gl_lock_addr(hash));
377 list_add_tail(&gl->gl_list, &gl_hash_table[hash].hb_list);
378 write_unlock(gl_lock_addr(hash));
387 gfs2_aspace_put(gl->gl_aspace);
389 kmem_cache_free(gfs2_glock_cachep, gl);
394 * gfs2_holder_init - initialize a struct gfs2_holder in the default way
396 * @state: the state we're requesting
397 * @flags: the modifier flags
398 * @gh: the holder structure
402 void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, unsigned flags,
403 struct gfs2_holder *gh)
405 INIT_LIST_HEAD(&gh->gh_list);
407 gh->gh_ip = (unsigned long)__builtin_return_address(0);
408 gh->gh_owner = current;
409 gh->gh_state = state;
410 gh->gh_flags = flags;
413 init_completion(&gh->gh_wait);
415 if (gh->gh_state == LM_ST_EXCLUSIVE)
416 gh->gh_flags |= GL_LOCAL_EXCL;
422 * gfs2_holder_reinit - reinitialize a struct gfs2_holder so we can requeue it
423 * @state: the state we're requesting
424 * @flags: the modifier flags
425 * @gh: the holder structure
427 * Don't mess with the glock.
431 void gfs2_holder_reinit(unsigned int state, unsigned flags, struct gfs2_holder *gh)
433 gh->gh_state = state;
434 gh->gh_flags = flags;
435 if (gh->gh_state == LM_ST_EXCLUSIVE)
436 gh->gh_flags |= GL_LOCAL_EXCL;
438 gh->gh_iflags &= 1 << HIF_ALLOCED;
439 gh->gh_ip = (unsigned long)__builtin_return_address(0);
443 * gfs2_holder_uninit - uninitialize a holder structure (drop glock reference)
444 * @gh: the holder structure
448 void gfs2_holder_uninit(struct gfs2_holder *gh)
450 gfs2_glock_put(gh->gh_gl);
456 * gfs2_holder_get - get a struct gfs2_holder structure
458 * @state: the state we're requesting
459 * @flags: the modifier flags
462 * Figure out how big an impact this function has. Either:
463 * 1) Replace it with a cache of structures hanging off the struct gfs2_sbd
464 * 2) Leave it like it is
466 * Returns: the holder structure, NULL on ENOMEM
469 static struct gfs2_holder *gfs2_holder_get(struct gfs2_glock *gl,
471 int flags, gfp_t gfp_flags)
473 struct gfs2_holder *gh;
475 gh = kmalloc(sizeof(struct gfs2_holder), gfp_flags);
479 gfs2_holder_init(gl, state, flags, gh);
480 set_bit(HIF_ALLOCED, &gh->gh_iflags);
481 gh->gh_ip = (unsigned long)__builtin_return_address(0);
486 * gfs2_holder_put - get rid of a struct gfs2_holder structure
487 * @gh: the holder structure
491 static void gfs2_holder_put(struct gfs2_holder *gh)
493 gfs2_holder_uninit(gh);
498 * rq_mutex - process a mutex request in the queue
499 * @gh: the glock holder
501 * Returns: 1 if the queue is blocked
504 static int rq_mutex(struct gfs2_holder *gh)
506 struct gfs2_glock *gl = gh->gh_gl;
508 list_del_init(&gh->gh_list);
509 /* gh->gh_error never examined. */
510 set_bit(GLF_LOCK, &gl->gl_flags);
511 complete(&gh->gh_wait);
517 * rq_promote - process a promote request in the queue
518 * @gh: the glock holder
520 * Acquire a new inter-node lock, or change a lock state to more restrictive.
522 * Returns: 1 if the queue is blocked
525 static int rq_promote(struct gfs2_holder *gh)
527 struct gfs2_glock *gl = gh->gh_gl;
528 struct gfs2_sbd *sdp = gl->gl_sbd;
529 const struct gfs2_glock_operations *glops = gl->gl_ops;
531 if (!relaxed_state_ok(gl->gl_state, gh->gh_state, gh->gh_flags)) {
532 if (list_empty(&gl->gl_holders)) {
534 set_bit(GLF_LOCK, &gl->gl_flags);
535 spin_unlock(&gl->gl_spin);
537 if (atomic_read(&sdp->sd_reclaim_count) >
538 gfs2_tune_get(sdp, gt_reclaim_limit) &&
539 !(gh->gh_flags & LM_FLAG_PRIORITY)) {
540 gfs2_reclaim_glock(sdp);
541 gfs2_reclaim_glock(sdp);
544 glops->go_xmote_th(gl, gh->gh_state, gh->gh_flags);
545 spin_lock(&gl->gl_spin);
550 if (list_empty(&gl->gl_holders)) {
551 set_bit(HIF_FIRST, &gh->gh_iflags);
552 set_bit(GLF_LOCK, &gl->gl_flags);
554 struct gfs2_holder *next_gh;
555 if (gh->gh_flags & GL_LOCAL_EXCL)
557 next_gh = list_entry(gl->gl_holders.next, struct gfs2_holder,
559 if (next_gh->gh_flags & GL_LOCAL_EXCL)
563 list_move_tail(&gh->gh_list, &gl->gl_holders);
565 set_bit(HIF_HOLDER, &gh->gh_iflags);
567 complete(&gh->gh_wait);
573 * rq_demote - process a demote request in the queue
574 * @gh: the glock holder
576 * Returns: 1 if the queue is blocked
579 static int rq_demote(struct gfs2_holder *gh)
581 struct gfs2_glock *gl = gh->gh_gl;
582 const struct gfs2_glock_operations *glops = gl->gl_ops;
584 if (!list_empty(&gl->gl_holders))
587 if (gl->gl_state == gh->gh_state || gl->gl_state == LM_ST_UNLOCKED) {
588 list_del_init(&gh->gh_list);
590 spin_unlock(&gl->gl_spin);
591 if (test_bit(HIF_DEALLOC, &gh->gh_iflags))
594 complete(&gh->gh_wait);
595 spin_lock(&gl->gl_spin);
598 set_bit(GLF_LOCK, &gl->gl_flags);
599 spin_unlock(&gl->gl_spin);
601 if (gh->gh_state == LM_ST_UNLOCKED ||
602 gl->gl_state != LM_ST_EXCLUSIVE)
603 glops->go_drop_th(gl);
605 glops->go_xmote_th(gl, gh->gh_state, gh->gh_flags);
607 spin_lock(&gl->gl_spin);
614 * rq_greedy - process a queued request to drop greedy status
615 * @gh: the glock holder
617 * Returns: 1 if the queue is blocked
620 static int rq_greedy(struct gfs2_holder *gh)
622 struct gfs2_glock *gl = gh->gh_gl;
624 list_del_init(&gh->gh_list);
625 /* gh->gh_error never examined. */
626 clear_bit(GLF_GREEDY, &gl->gl_flags);
627 spin_unlock(&gl->gl_spin);
629 gfs2_holder_uninit(gh);
630 kfree(container_of(gh, struct greedy, gr_gh));
632 spin_lock(&gl->gl_spin);
638 * run_queue - process holder structures on a glock
642 static void run_queue(struct gfs2_glock *gl)
644 struct gfs2_holder *gh;
648 if (test_bit(GLF_LOCK, &gl->gl_flags))
651 if (!list_empty(&gl->gl_waiters1)) {
652 gh = list_entry(gl->gl_waiters1.next,
653 struct gfs2_holder, gh_list);
655 if (test_bit(HIF_MUTEX, &gh->gh_iflags))
656 blocked = rq_mutex(gh);
658 gfs2_assert_warn(gl->gl_sbd, 0);
660 } else if (!list_empty(&gl->gl_waiters2) &&
661 !test_bit(GLF_SKIP_WAITERS2, &gl->gl_flags)) {
662 gh = list_entry(gl->gl_waiters2.next,
663 struct gfs2_holder, gh_list);
665 if (test_bit(HIF_DEMOTE, &gh->gh_iflags))
666 blocked = rq_demote(gh);
667 else if (test_bit(HIF_GREEDY, &gh->gh_iflags))
668 blocked = rq_greedy(gh);
670 gfs2_assert_warn(gl->gl_sbd, 0);
672 } else if (!list_empty(&gl->gl_waiters3)) {
673 gh = list_entry(gl->gl_waiters3.next,
674 struct gfs2_holder, gh_list);
676 if (test_bit(HIF_PROMOTE, &gh->gh_iflags))
677 blocked = rq_promote(gh);
679 gfs2_assert_warn(gl->gl_sbd, 0);
690 * gfs2_glmutex_lock - acquire a local lock on a glock
693 * Gives caller exclusive access to manipulate a glock structure.
696 static void gfs2_glmutex_lock(struct gfs2_glock *gl)
698 struct gfs2_holder gh;
700 gfs2_holder_init(gl, 0, 0, &gh);
701 set_bit(HIF_MUTEX, &gh.gh_iflags);
703 spin_lock(&gl->gl_spin);
704 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
705 list_add_tail(&gh.gh_list, &gl->gl_waiters1);
707 gl->gl_owner = current;
708 gl->gl_ip = (unsigned long)__builtin_return_address(0);
709 complete(&gh.gh_wait);
711 spin_unlock(&gl->gl_spin);
713 wait_for_completion(&gh.gh_wait);
714 gfs2_holder_uninit(&gh);
718 * gfs2_glmutex_trylock - try to acquire a local lock on a glock
721 * Returns: 1 if the glock is acquired
724 static int gfs2_glmutex_trylock(struct gfs2_glock *gl)
728 spin_lock(&gl->gl_spin);
729 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
732 gl->gl_owner = current;
733 gl->gl_ip = (unsigned long)__builtin_return_address(0);
735 spin_unlock(&gl->gl_spin);
741 * gfs2_glmutex_unlock - release a local lock on a glock
746 static void gfs2_glmutex_unlock(struct gfs2_glock *gl)
748 spin_lock(&gl->gl_spin);
749 clear_bit(GLF_LOCK, &gl->gl_flags);
753 BUG_ON(!spin_is_locked(&gl->gl_spin));
754 spin_unlock(&gl->gl_spin);
758 * handle_callback - add a demote request to a lock's queue
760 * @state: the state the caller wants us to change to
762 * Note: This may fail sliently if we are out of memory.
765 static void handle_callback(struct gfs2_glock *gl, unsigned int state)
767 struct gfs2_holder *gh, *new_gh = NULL;
770 spin_lock(&gl->gl_spin);
772 list_for_each_entry(gh, &gl->gl_waiters2, gh_list) {
773 if (test_bit(HIF_DEMOTE, &gh->gh_iflags) &&
774 gl->gl_req_gh != gh) {
775 if (gh->gh_state != state)
776 gh->gh_state = LM_ST_UNLOCKED;
782 list_add_tail(&new_gh->gh_list, &gl->gl_waiters2);
785 spin_unlock(&gl->gl_spin);
787 new_gh = gfs2_holder_get(gl, state, LM_FLAG_TRY, GFP_KERNEL);
790 set_bit(HIF_DEMOTE, &new_gh->gh_iflags);
791 set_bit(HIF_DEALLOC, &new_gh->gh_iflags);
797 spin_unlock(&gl->gl_spin);
800 gfs2_holder_put(new_gh);
803 void gfs2_glock_inode_squish(struct inode *inode)
805 struct gfs2_holder gh;
806 struct gfs2_glock *gl = GFS2_I(inode)->i_gl;
807 gfs2_holder_init(gl, LM_ST_UNLOCKED, 0, &gh);
808 set_bit(HIF_DEMOTE, &gh.gh_iflags);
809 spin_lock(&gl->gl_spin);
810 gfs2_assert(inode->i_sb->s_fs_info, list_empty(&gl->gl_holders));
811 list_add_tail(&gh.gh_list, &gl->gl_waiters2);
813 spin_unlock(&gl->gl_spin);
814 wait_for_completion(&gh.gh_wait);
815 gfs2_holder_uninit(&gh);
819 * state_change - record that the glock is now in a different state
821 * @new_state the new state
825 static void state_change(struct gfs2_glock *gl, unsigned int new_state)
829 held1 = (gl->gl_state != LM_ST_UNLOCKED);
830 held2 = (new_state != LM_ST_UNLOCKED);
832 if (held1 != held2) {
839 gl->gl_state = new_state;
843 * xmote_bh - Called after the lock module is done acquiring a lock
844 * @gl: The glock in question
845 * @ret: the int returned from the lock module
849 static void xmote_bh(struct gfs2_glock *gl, unsigned int ret)
851 struct gfs2_sbd *sdp = gl->gl_sbd;
852 const struct gfs2_glock_operations *glops = gl->gl_ops;
853 struct gfs2_holder *gh = gl->gl_req_gh;
854 int prev_state = gl->gl_state;
857 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
858 gfs2_assert_warn(sdp, queue_empty(gl, &gl->gl_holders));
859 gfs2_assert_warn(sdp, !(ret & LM_OUT_ASYNC));
861 state_change(gl, ret & LM_OUT_ST_MASK);
863 if (prev_state != LM_ST_UNLOCKED && !(ret & LM_OUT_CACHEABLE)) {
865 glops->go_inval(gl, DIO_METADATA | DIO_DATA);
866 } else if (gl->gl_state == LM_ST_DEFERRED) {
867 /* We might not want to do this here.
868 Look at moving to the inode glops. */
870 glops->go_inval(gl, DIO_DATA);
873 /* Deal with each possible exit condition */
876 gl->gl_stamp = jiffies;
877 else if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) {
878 spin_lock(&gl->gl_spin);
879 list_del_init(&gh->gh_list);
881 spin_unlock(&gl->gl_spin);
882 } else if (test_bit(HIF_DEMOTE, &gh->gh_iflags)) {
883 spin_lock(&gl->gl_spin);
884 list_del_init(&gh->gh_list);
885 if (gl->gl_state == gh->gh_state ||
886 gl->gl_state == LM_ST_UNLOCKED) {
889 if (gfs2_assert_warn(sdp, gh->gh_flags &
890 (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) == -1)
891 fs_warn(sdp, "ret = 0x%.8X\n", ret);
892 gh->gh_error = GLR_TRYFAILED;
894 spin_unlock(&gl->gl_spin);
896 if (ret & LM_OUT_CANCELED)
897 handle_callback(gl, LM_ST_UNLOCKED);
899 } else if (ret & LM_OUT_CANCELED) {
900 spin_lock(&gl->gl_spin);
901 list_del_init(&gh->gh_list);
902 gh->gh_error = GLR_CANCELED;
903 spin_unlock(&gl->gl_spin);
905 } else if (relaxed_state_ok(gl->gl_state, gh->gh_state, gh->gh_flags)) {
906 spin_lock(&gl->gl_spin);
907 list_move_tail(&gh->gh_list, &gl->gl_holders);
909 set_bit(HIF_HOLDER, &gh->gh_iflags);
910 spin_unlock(&gl->gl_spin);
912 set_bit(HIF_FIRST, &gh->gh_iflags);
916 } else if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
917 spin_lock(&gl->gl_spin);
918 list_del_init(&gh->gh_list);
919 gh->gh_error = GLR_TRYFAILED;
920 spin_unlock(&gl->gl_spin);
923 if (gfs2_assert_withdraw(sdp, 0) == -1)
924 fs_err(sdp, "ret = 0x%.8X\n", ret);
927 if (glops->go_xmote_bh)
928 glops->go_xmote_bh(gl);
931 spin_lock(&gl->gl_spin);
932 gl->gl_req_gh = NULL;
933 gl->gl_req_bh = NULL;
934 clear_bit(GLF_LOCK, &gl->gl_flags);
936 spin_unlock(&gl->gl_spin);
942 if (test_bit(HIF_DEALLOC, &gh->gh_iflags))
945 complete(&gh->gh_wait);
950 * gfs2_glock_xmote_th - Call into the lock module to acquire or change a glock
951 * @gl: The glock in question
952 * @state: the requested state
953 * @flags: modifier flags to the lock call
957 void gfs2_glock_xmote_th(struct gfs2_glock *gl, unsigned int state, int flags)
959 struct gfs2_sbd *sdp = gl->gl_sbd;
960 const struct gfs2_glock_operations *glops = gl->gl_ops;
961 int lck_flags = flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB |
962 LM_FLAG_NOEXP | LM_FLAG_ANY |
964 unsigned int lck_ret;
966 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
967 gfs2_assert_warn(sdp, queue_empty(gl, &gl->gl_holders));
968 gfs2_assert_warn(sdp, state != LM_ST_UNLOCKED);
969 gfs2_assert_warn(sdp, state != gl->gl_state);
971 if (gl->gl_state == LM_ST_EXCLUSIVE && glops->go_sync)
972 glops->go_sync(gl, DIO_METADATA | DIO_DATA | DIO_RELEASE);
975 gl->gl_req_bh = xmote_bh;
977 lck_ret = gfs2_lm_lock(sdp, gl->gl_lock, gl->gl_state, state, lck_flags);
979 if (gfs2_assert_withdraw(sdp, !(lck_ret & LM_OUT_ERROR)))
982 if (lck_ret & LM_OUT_ASYNC)
983 gfs2_assert_warn(sdp, lck_ret == LM_OUT_ASYNC);
985 xmote_bh(gl, lck_ret);
989 * drop_bh - Called after a lock module unlock completes
991 * @ret: the return status
993 * Doesn't wake up the process waiting on the struct gfs2_holder (if any)
994 * Doesn't drop the reference on the glock the top half took out
998 static void drop_bh(struct gfs2_glock *gl, unsigned int ret)
1000 struct gfs2_sbd *sdp = gl->gl_sbd;
1001 const struct gfs2_glock_operations *glops = gl->gl_ops;
1002 struct gfs2_holder *gh = gl->gl_req_gh;
1004 clear_bit(GLF_PREFETCH, &gl->gl_flags);
1006 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
1007 gfs2_assert_warn(sdp, queue_empty(gl, &gl->gl_holders));
1008 gfs2_assert_warn(sdp, !ret);
1010 state_change(gl, LM_ST_UNLOCKED);
1012 if (glops->go_inval)
1013 glops->go_inval(gl, DIO_METADATA | DIO_DATA);
1016 spin_lock(&gl->gl_spin);
1017 list_del_init(&gh->gh_list);
1019 spin_unlock(&gl->gl_spin);
1022 if (glops->go_drop_bh)
1023 glops->go_drop_bh(gl);
1025 spin_lock(&gl->gl_spin);
1026 gl->gl_req_gh = NULL;
1027 gl->gl_req_bh = NULL;
1028 clear_bit(GLF_LOCK, &gl->gl_flags);
1030 spin_unlock(&gl->gl_spin);
1035 if (test_bit(HIF_DEALLOC, &gh->gh_iflags))
1036 gfs2_holder_put(gh);
1038 complete(&gh->gh_wait);
1043 * gfs2_glock_drop_th - call into the lock module to unlock a lock
1048 void gfs2_glock_drop_th(struct gfs2_glock *gl)
1050 struct gfs2_sbd *sdp = gl->gl_sbd;
1051 const struct gfs2_glock_operations *glops = gl->gl_ops;
1054 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
1055 gfs2_assert_warn(sdp, queue_empty(gl, &gl->gl_holders));
1056 gfs2_assert_warn(sdp, gl->gl_state != LM_ST_UNLOCKED);
1058 if (gl->gl_state == LM_ST_EXCLUSIVE && glops->go_sync)
1059 glops->go_sync(gl, DIO_METADATA | DIO_DATA | DIO_RELEASE);
1061 gfs2_glock_hold(gl);
1062 gl->gl_req_bh = drop_bh;
1064 ret = gfs2_lm_unlock(sdp, gl->gl_lock, gl->gl_state);
1066 if (gfs2_assert_withdraw(sdp, !(ret & LM_OUT_ERROR)))
1072 gfs2_assert_warn(sdp, ret == LM_OUT_ASYNC);
1076 * do_cancels - cancel requests for locks stuck waiting on an expire flag
1077 * @gh: the LM_FLAG_PRIORITY holder waiting to acquire the lock
1079 * Don't cancel GL_NOCANCEL requests.
1082 static void do_cancels(struct gfs2_holder *gh)
1084 struct gfs2_glock *gl = gh->gh_gl;
1086 spin_lock(&gl->gl_spin);
1088 while (gl->gl_req_gh != gh &&
1089 !test_bit(HIF_HOLDER, &gh->gh_iflags) &&
1090 !list_empty(&gh->gh_list)) {
1091 if (gl->gl_req_bh && !(gl->gl_req_gh &&
1092 (gl->gl_req_gh->gh_flags & GL_NOCANCEL))) {
1093 spin_unlock(&gl->gl_spin);
1094 gfs2_lm_cancel(gl->gl_sbd, gl->gl_lock);
1096 spin_lock(&gl->gl_spin);
1098 spin_unlock(&gl->gl_spin);
1100 spin_lock(&gl->gl_spin);
1104 spin_unlock(&gl->gl_spin);
1108 * glock_wait_internal - wait on a glock acquisition
1109 * @gh: the glock holder
1111 * Returns: 0 on success
1114 static int glock_wait_internal(struct gfs2_holder *gh)
1116 struct gfs2_glock *gl = gh->gh_gl;
1117 struct gfs2_sbd *sdp = gl->gl_sbd;
1118 const struct gfs2_glock_operations *glops = gl->gl_ops;
1120 if (test_bit(HIF_ABORTED, &gh->gh_iflags))
1123 if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
1124 spin_lock(&gl->gl_spin);
1125 if (gl->gl_req_gh != gh &&
1126 !test_bit(HIF_HOLDER, &gh->gh_iflags) &&
1127 !list_empty(&gh->gh_list)) {
1128 list_del_init(&gh->gh_list);
1129 gh->gh_error = GLR_TRYFAILED;
1131 spin_unlock(&gl->gl_spin);
1132 return gh->gh_error;
1134 spin_unlock(&gl->gl_spin);
1137 if (gh->gh_flags & LM_FLAG_PRIORITY)
1140 wait_for_completion(&gh->gh_wait);
1143 return gh->gh_error;
1145 gfs2_assert_withdraw(sdp, test_bit(HIF_HOLDER, &gh->gh_iflags));
1146 gfs2_assert_withdraw(sdp, relaxed_state_ok(gl->gl_state, gh->gh_state,
1149 if (test_bit(HIF_FIRST, &gh->gh_iflags)) {
1150 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
1152 if (glops->go_lock) {
1153 gh->gh_error = glops->go_lock(gh);
1155 spin_lock(&gl->gl_spin);
1156 list_del_init(&gh->gh_list);
1157 spin_unlock(&gl->gl_spin);
1161 spin_lock(&gl->gl_spin);
1162 gl->gl_req_gh = NULL;
1163 gl->gl_req_bh = NULL;
1164 clear_bit(GLF_LOCK, &gl->gl_flags);
1166 spin_unlock(&gl->gl_spin);
1169 return gh->gh_error;
1172 static inline struct gfs2_holder *
1173 find_holder_by_owner(struct list_head *head, struct task_struct *owner)
1175 struct gfs2_holder *gh;
1177 list_for_each_entry(gh, head, gh_list) {
1178 if (gh->gh_owner == owner)
1186 * add_to_queue - Add a holder to the wait queue (but look for recursion)
1187 * @gh: the holder structure to add
1191 static void add_to_queue(struct gfs2_holder *gh)
1193 struct gfs2_glock *gl = gh->gh_gl;
1194 struct gfs2_holder *existing;
1196 BUG_ON(!gh->gh_owner);
1198 existing = find_holder_by_owner(&gl->gl_holders, gh->gh_owner);
1200 print_symbol(KERN_WARNING "original: %s\n", existing->gh_ip);
1201 printk(KERN_INFO "pid : %d\n", existing->gh_owner->pid);
1202 printk(KERN_INFO "lock type : %d lock state : %d\n",
1203 existing->gh_gl->gl_name.ln_type, existing->gh_gl->gl_state);
1204 print_symbol(KERN_WARNING "new: %s\n", gh->gh_ip);
1205 printk(KERN_INFO "pid : %d\n", gh->gh_owner->pid);
1206 printk(KERN_INFO "lock type : %d lock state : %d\n",
1207 gl->gl_name.ln_type, gl->gl_state);
1211 existing = find_holder_by_owner(&gl->gl_waiters3, gh->gh_owner);
1213 print_symbol(KERN_WARNING "original: %s\n", existing->gh_ip);
1214 print_symbol(KERN_WARNING "new: %s\n", gh->gh_ip);
1218 if (gh->gh_flags & LM_FLAG_PRIORITY)
1219 list_add(&gh->gh_list, &gl->gl_waiters3);
1221 list_add_tail(&gh->gh_list, &gl->gl_waiters3);
1225 * gfs2_glock_nq - enqueue a struct gfs2_holder onto a glock (acquire a glock)
1226 * @gh: the holder structure
1228 * if (gh->gh_flags & GL_ASYNC), this never returns an error
1230 * Returns: 0, GLR_TRYFAILED, or errno on failure
1233 int gfs2_glock_nq(struct gfs2_holder *gh)
1235 struct gfs2_glock *gl = gh->gh_gl;
1236 struct gfs2_sbd *sdp = gl->gl_sbd;
1240 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) {
1241 set_bit(HIF_ABORTED, &gh->gh_iflags);
1245 set_bit(HIF_PROMOTE, &gh->gh_iflags);
1247 spin_lock(&gl->gl_spin);
1250 spin_unlock(&gl->gl_spin);
1252 if (!(gh->gh_flags & GL_ASYNC)) {
1253 error = glock_wait_internal(gh);
1254 if (error == GLR_CANCELED) {
1260 clear_bit(GLF_PREFETCH, &gl->gl_flags);
1262 if (error == GLR_TRYFAILED && (gh->gh_flags & GL_DUMP))
1269 * gfs2_glock_poll - poll to see if an async request has been completed
1272 * Returns: 1 if the request is ready to be gfs2_glock_wait()ed on
1275 int gfs2_glock_poll(struct gfs2_holder *gh)
1277 struct gfs2_glock *gl = gh->gh_gl;
1280 spin_lock(&gl->gl_spin);
1282 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
1284 else if (list_empty(&gh->gh_list)) {
1285 if (gh->gh_error == GLR_CANCELED) {
1286 spin_unlock(&gl->gl_spin);
1288 if (gfs2_glock_nq(gh))
1295 spin_unlock(&gl->gl_spin);
1301 * gfs2_glock_wait - wait for a lock acquisition that ended in a GLR_ASYNC
1302 * @gh: the holder structure
1304 * Returns: 0, GLR_TRYFAILED, or errno on failure
1307 int gfs2_glock_wait(struct gfs2_holder *gh)
1311 error = glock_wait_internal(gh);
1312 if (error == GLR_CANCELED) {
1314 gh->gh_flags &= ~GL_ASYNC;
1315 error = gfs2_glock_nq(gh);
1322 * gfs2_glock_dq - dequeue a struct gfs2_holder from a glock (release a glock)
1323 * @gh: the glock holder
1327 void gfs2_glock_dq(struct gfs2_holder *gh)
1329 struct gfs2_glock *gl = gh->gh_gl;
1330 const struct gfs2_glock_operations *glops = gl->gl_ops;
1332 if (gh->gh_flags & GL_NOCACHE)
1333 handle_callback(gl, LM_ST_UNLOCKED);
1335 gfs2_glmutex_lock(gl);
1337 spin_lock(&gl->gl_spin);
1338 list_del_init(&gh->gh_list);
1340 if (list_empty(&gl->gl_holders)) {
1341 spin_unlock(&gl->gl_spin);
1343 if (glops->go_unlock)
1344 glops->go_unlock(gh);
1346 gl->gl_stamp = jiffies;
1348 spin_lock(&gl->gl_spin);
1351 clear_bit(GLF_LOCK, &gl->gl_flags);
1353 spin_unlock(&gl->gl_spin);
1357 * gfs2_glock_prefetch - Try to prefetch a glock
1359 * @state: the state to prefetch in
1360 * @flags: flags passed to go_xmote_th()
1364 static void gfs2_glock_prefetch(struct gfs2_glock *gl, unsigned int state,
1367 const struct gfs2_glock_operations *glops = gl->gl_ops;
1369 spin_lock(&gl->gl_spin);
1371 if (test_bit(GLF_LOCK, &gl->gl_flags) || !list_empty(&gl->gl_holders) ||
1372 !list_empty(&gl->gl_waiters1) || !list_empty(&gl->gl_waiters2) ||
1373 !list_empty(&gl->gl_waiters3) ||
1374 relaxed_state_ok(gl->gl_state, state, flags)) {
1375 spin_unlock(&gl->gl_spin);
1379 set_bit(GLF_PREFETCH, &gl->gl_flags);
1380 set_bit(GLF_LOCK, &gl->gl_flags);
1381 spin_unlock(&gl->gl_spin);
1383 glops->go_xmote_th(gl, state, flags);
1386 static void greedy_work(void *data)
1388 struct greedy *gr = data;
1389 struct gfs2_holder *gh = &gr->gr_gh;
1390 struct gfs2_glock *gl = gh->gh_gl;
1391 const struct gfs2_glock_operations *glops = gl->gl_ops;
1393 clear_bit(GLF_SKIP_WAITERS2, &gl->gl_flags);
1395 if (glops->go_greedy)
1396 glops->go_greedy(gl);
1398 spin_lock(&gl->gl_spin);
1400 if (list_empty(&gl->gl_waiters2)) {
1401 clear_bit(GLF_GREEDY, &gl->gl_flags);
1402 spin_unlock(&gl->gl_spin);
1403 gfs2_holder_uninit(gh);
1406 gfs2_glock_hold(gl);
1407 list_add_tail(&gh->gh_list, &gl->gl_waiters2);
1409 spin_unlock(&gl->gl_spin);
1415 * gfs2_glock_be_greedy -
1419 * Returns: 0 if go_greedy will be called, 1 otherwise
1422 int gfs2_glock_be_greedy(struct gfs2_glock *gl, unsigned int time)
1425 struct gfs2_holder *gh;
1427 if (!time || gl->gl_sbd->sd_args.ar_localcaching ||
1428 test_and_set_bit(GLF_GREEDY, &gl->gl_flags))
1431 gr = kmalloc(sizeof(struct greedy), GFP_KERNEL);
1433 clear_bit(GLF_GREEDY, &gl->gl_flags);
1438 gfs2_holder_init(gl, 0, 0, gh);
1439 set_bit(HIF_GREEDY, &gh->gh_iflags);
1440 INIT_WORK(&gr->gr_work, greedy_work, gr);
1442 set_bit(GLF_SKIP_WAITERS2, &gl->gl_flags);
1443 schedule_delayed_work(&gr->gr_work, time);
1449 * gfs2_glock_dq_uninit - dequeue a holder from a glock and initialize it
1450 * @gh: the holder structure
1454 void gfs2_glock_dq_uninit(struct gfs2_holder *gh)
1457 gfs2_holder_uninit(gh);
1461 * gfs2_glock_nq_num - acquire a glock based on lock number
1462 * @sdp: the filesystem
1463 * @number: the lock number
1464 * @glops: the glock operations for the type of glock
1465 * @state: the state to acquire the glock in
1466 * @flags: modifier flags for the aquisition
1467 * @gh: the struct gfs2_holder
1472 int gfs2_glock_nq_num(struct gfs2_sbd *sdp, u64 number,
1473 const struct gfs2_glock_operations *glops,
1474 unsigned int state, int flags, struct gfs2_holder *gh)
1476 struct gfs2_glock *gl;
1479 error = gfs2_glock_get(sdp, number, glops, CREATE, &gl);
1481 error = gfs2_glock_nq_init(gl, state, flags, gh);
1489 * glock_compare - Compare two struct gfs2_glock structures for sorting
1490 * @arg_a: the first structure
1491 * @arg_b: the second structure
1495 static int glock_compare(const void *arg_a, const void *arg_b)
1497 struct gfs2_holder *gh_a = *(struct gfs2_holder **)arg_a;
1498 struct gfs2_holder *gh_b = *(struct gfs2_holder **)arg_b;
1499 struct lm_lockname *a = &gh_a->gh_gl->gl_name;
1500 struct lm_lockname *b = &gh_b->gh_gl->gl_name;
1503 if (a->ln_number > b->ln_number)
1505 else if (a->ln_number < b->ln_number)
1508 if (gh_a->gh_state == LM_ST_SHARED &&
1509 gh_b->gh_state == LM_ST_EXCLUSIVE)
1511 else if (!(gh_a->gh_flags & GL_LOCAL_EXCL) &&
1512 (gh_b->gh_flags & GL_LOCAL_EXCL))
1520 * nq_m_sync - synchonously acquire more than one glock in deadlock free order
1521 * @num_gh: the number of structures
1522 * @ghs: an array of struct gfs2_holder structures
1524 * Returns: 0 on success (all glocks acquired),
1525 * errno on failure (no glocks acquired)
1528 static int nq_m_sync(unsigned int num_gh, struct gfs2_holder *ghs,
1529 struct gfs2_holder **p)
1534 for (x = 0; x < num_gh; x++)
1537 sort(p, num_gh, sizeof(struct gfs2_holder *), glock_compare, NULL);
1539 for (x = 0; x < num_gh; x++) {
1540 p[x]->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1542 error = gfs2_glock_nq(p[x]);
1545 gfs2_glock_dq(p[x]);
1554 * gfs2_glock_nq_m - acquire multiple glocks
1555 * @num_gh: the number of structures
1556 * @ghs: an array of struct gfs2_holder structures
1558 * Figure out how big an impact this function has. Either:
1559 * 1) Replace this code with code that calls gfs2_glock_prefetch()
1560 * 2) Forget async stuff and just call nq_m_sync()
1561 * 3) Leave it like it is
1563 * Returns: 0 on success (all glocks acquired),
1564 * errno on failure (no glocks acquired)
1567 int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1571 int borked = 0, serious = 0;
1578 ghs->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1579 return gfs2_glock_nq(ghs);
1582 e = kcalloc(num_gh, sizeof(struct gfs2_holder *), GFP_KERNEL);
1586 for (x = 0; x < num_gh; x++) {
1587 ghs[x].gh_flags |= LM_FLAG_TRY | GL_ASYNC;
1588 error = gfs2_glock_nq(&ghs[x]);
1597 for (x = 0; x < num_gh; x++) {
1598 error = e[x] = glock_wait_internal(&ghs[x]);
1601 if (error != GLR_TRYFAILED && error != GLR_CANCELED)
1611 for (x = 0; x < num_gh; x++)
1613 gfs2_glock_dq(&ghs[x]);
1618 for (x = 0; x < num_gh; x++)
1619 gfs2_holder_reinit(ghs[x].gh_state, ghs[x].gh_flags,
1621 error = nq_m_sync(num_gh, ghs, (struct gfs2_holder **)e);
1630 * gfs2_glock_dq_m - release multiple glocks
1631 * @num_gh: the number of structures
1632 * @ghs: an array of struct gfs2_holder structures
1636 void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1640 for (x = 0; x < num_gh; x++)
1641 gfs2_glock_dq(&ghs[x]);
1645 * gfs2_glock_dq_uninit_m - release multiple glocks
1646 * @num_gh: the number of structures
1647 * @ghs: an array of struct gfs2_holder structures
1651 void gfs2_glock_dq_uninit_m(unsigned int num_gh, struct gfs2_holder *ghs)
1655 for (x = 0; x < num_gh; x++)
1656 gfs2_glock_dq_uninit(&ghs[x]);
1660 * gfs2_glock_prefetch_num - prefetch a glock based on lock number
1661 * @sdp: the filesystem
1662 * @number: the lock number
1663 * @glops: the glock operations for the type of glock
1664 * @state: the state to acquire the glock in
1665 * @flags: modifier flags for the aquisition
1670 void gfs2_glock_prefetch_num(struct gfs2_sbd *sdp, u64 number,
1671 const struct gfs2_glock_operations *glops,
1672 unsigned int state, int flags)
1674 struct gfs2_glock *gl;
1677 if (atomic_read(&sdp->sd_reclaim_count) <
1678 gfs2_tune_get(sdp, gt_reclaim_limit)) {
1679 error = gfs2_glock_get(sdp, number, glops, CREATE, &gl);
1681 gfs2_glock_prefetch(gl, state, flags);
1688 * gfs2_lvb_hold - attach a LVB from a glock
1689 * @gl: The glock in question
1693 int gfs2_lvb_hold(struct gfs2_glock *gl)
1697 gfs2_glmutex_lock(gl);
1699 if (!atomic_read(&gl->gl_lvb_count)) {
1700 error = gfs2_lm_hold_lvb(gl->gl_sbd, gl->gl_lock, &gl->gl_lvb);
1702 gfs2_glmutex_unlock(gl);
1705 gfs2_glock_hold(gl);
1707 atomic_inc(&gl->gl_lvb_count);
1709 gfs2_glmutex_unlock(gl);
1715 * gfs2_lvb_unhold - detach a LVB from a glock
1716 * @gl: The glock in question
1720 void gfs2_lvb_unhold(struct gfs2_glock *gl)
1722 gfs2_glock_hold(gl);
1723 gfs2_glmutex_lock(gl);
1725 gfs2_assert(gl->gl_sbd, atomic_read(&gl->gl_lvb_count) > 0);
1726 if (atomic_dec_and_test(&gl->gl_lvb_count)) {
1727 gfs2_lm_unhold_lvb(gl->gl_sbd, gl->gl_lock, gl->gl_lvb);
1732 gfs2_glmutex_unlock(gl);
1736 static void blocking_cb(struct gfs2_sbd *sdp, struct lm_lockname *name,
1739 struct gfs2_glock *gl;
1741 gl = gfs2_glock_find(sdp, name);
1745 if (gl->gl_ops->go_callback)
1746 gl->gl_ops->go_callback(gl, state);
1747 handle_callback(gl, state);
1749 spin_lock(&gl->gl_spin);
1751 spin_unlock(&gl->gl_spin);
1757 * gfs2_glock_cb - Callback used by locking module
1758 * @sdp: Pointer to the superblock
1759 * @type: Type of callback
1760 * @data: Type dependent data pointer
1762 * Called by the locking module when it wants to tell us something.
1763 * Either we need to drop a lock, one of our ASYNC requests completed, or
1764 * a journal from another client needs to be recovered.
1767 void gfs2_glock_cb(void *cb_data, unsigned int type, void *data)
1769 struct gfs2_sbd *sdp = cb_data;
1773 blocking_cb(sdp, data, LM_ST_UNLOCKED);
1777 blocking_cb(sdp, data, LM_ST_DEFERRED);
1781 blocking_cb(sdp, data, LM_ST_SHARED);
1785 struct lm_async_cb *async = data;
1786 struct gfs2_glock *gl;
1788 gl = gfs2_glock_find(sdp, &async->lc_name);
1789 if (gfs2_assert_warn(sdp, gl))
1791 if (!gfs2_assert_warn(sdp, gl->gl_req_bh))
1792 gl->gl_req_bh(gl, async->lc_ret);
1797 case LM_CB_NEED_RECOVERY:
1798 gfs2_jdesc_make_dirty(sdp, *(unsigned int *)data);
1799 if (sdp->sd_recoverd_process)
1800 wake_up_process(sdp->sd_recoverd_process);
1803 case LM_CB_DROPLOCKS:
1804 gfs2_gl_hash_clear(sdp, NO_WAIT);
1805 gfs2_quota_scan(sdp);
1809 gfs2_assert_warn(sdp, 0);
1815 * gfs2_iopen_go_callback - Try to kick the inode/vnode associated with an
1816 * iopen glock from memory
1817 * @io_gl: the iopen glock
1818 * @state: the state into which the glock should be put
1822 void gfs2_iopen_go_callback(struct gfs2_glock *io_gl, unsigned int state)
1825 if (state != LM_ST_UNLOCKED)
1827 /* FIXME: remove this? */
1831 * demote_ok - Check to see if it's ok to unlock a glock
1834 * Returns: 1 if it's ok
1837 static int demote_ok(struct gfs2_glock *gl)
1839 struct gfs2_sbd *sdp = gl->gl_sbd;
1840 const struct gfs2_glock_operations *glops = gl->gl_ops;
1843 if (test_bit(GLF_STICKY, &gl->gl_flags))
1845 else if (test_bit(GLF_PREFETCH, &gl->gl_flags))
1846 demote = time_after_eq(jiffies, gl->gl_stamp +
1847 gfs2_tune_get(sdp, gt_prefetch_secs) * HZ);
1848 else if (glops->go_demote_ok)
1849 demote = glops->go_demote_ok(gl);
1855 * gfs2_glock_schedule_for_reclaim - Add a glock to the reclaim list
1860 void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl)
1862 struct gfs2_sbd *sdp = gl->gl_sbd;
1864 spin_lock(&sdp->sd_reclaim_lock);
1865 if (list_empty(&gl->gl_reclaim)) {
1866 gfs2_glock_hold(gl);
1867 list_add(&gl->gl_reclaim, &sdp->sd_reclaim_list);
1868 atomic_inc(&sdp->sd_reclaim_count);
1870 spin_unlock(&sdp->sd_reclaim_lock);
1872 wake_up(&sdp->sd_reclaim_wq);
1876 * gfs2_reclaim_glock - process the next glock on the filesystem's reclaim list
1877 * @sdp: the filesystem
1879 * Called from gfs2_glockd() glock reclaim daemon, or when promoting a
1880 * different glock and we notice that there are a lot of glocks in the
1885 void gfs2_reclaim_glock(struct gfs2_sbd *sdp)
1887 struct gfs2_glock *gl;
1889 spin_lock(&sdp->sd_reclaim_lock);
1890 if (list_empty(&sdp->sd_reclaim_list)) {
1891 spin_unlock(&sdp->sd_reclaim_lock);
1894 gl = list_entry(sdp->sd_reclaim_list.next,
1895 struct gfs2_glock, gl_reclaim);
1896 list_del_init(&gl->gl_reclaim);
1897 spin_unlock(&sdp->sd_reclaim_lock);
1899 atomic_dec(&sdp->sd_reclaim_count);
1900 atomic_inc(&sdp->sd_reclaimed);
1902 if (gfs2_glmutex_trylock(gl)) {
1903 if (queue_empty(gl, &gl->gl_holders) &&
1904 gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl))
1905 handle_callback(gl, LM_ST_UNLOCKED);
1906 gfs2_glmutex_unlock(gl);
1913 * examine_bucket - Call a function for glock in a hash bucket
1914 * @examiner: the function
1915 * @sdp: the filesystem
1916 * @bucket: the bucket
1918 * Returns: 1 if the bucket has entries
1921 static int examine_bucket(glock_examiner examiner, struct gfs2_sbd *sdp,
1924 struct glock_plug plug;
1925 struct list_head *tmp;
1926 struct gfs2_glock *gl;
1929 /* Add "plug" to end of bucket list, work back up list from there */
1930 memset(&plug.gl_flags, 0, sizeof(unsigned long));
1931 set_bit(GLF_PLUG, &plug.gl_flags);
1933 write_lock(gl_lock_addr(hash));
1934 list_add(&plug.gl_list, &gl_hash_table[hash].hb_list);
1935 write_unlock(gl_lock_addr(hash));
1938 write_lock(gl_lock_addr(hash));
1941 tmp = plug.gl_list.next;
1943 if (tmp == &gl_hash_table[hash].hb_list) {
1944 list_del(&plug.gl_list);
1945 entries = !list_empty(&gl_hash_table[hash].hb_list);
1946 write_unlock(gl_lock_addr(hash));
1949 gl = list_entry(tmp, struct gfs2_glock, gl_list);
1951 /* Move plug up list */
1952 list_move(&plug.gl_list, &gl->gl_list);
1954 if (test_bit(GLF_PLUG, &gl->gl_flags))
1956 if (gl->gl_sbd != sdp)
1959 /* examiner() must glock_put() */
1960 gfs2_glock_hold(gl);
1965 write_unlock(gl_lock_addr(hash));
1972 * scan_glock - look at a glock and see if we can reclaim it
1973 * @gl: the glock to look at
1977 static void scan_glock(struct gfs2_glock *gl)
1979 if (gl->gl_ops == &gfs2_inode_glops)
1982 if (gfs2_glmutex_trylock(gl)) {
1983 if (queue_empty(gl, &gl->gl_holders) &&
1984 gl->gl_state != LM_ST_UNLOCKED &&
1987 gfs2_glmutex_unlock(gl);
1994 gfs2_glmutex_unlock(gl);
1995 gfs2_glock_schedule_for_reclaim(gl);
2000 * gfs2_scand_internal - Look for glocks and inodes to toss from memory
2001 * @sdp: the filesystem
2005 void gfs2_scand_internal(struct gfs2_sbd *sdp)
2009 for (x = 0; x < GFS2_GL_HASH_SIZE; x++) {
2010 examine_bucket(scan_glock, sdp, x);
2016 * clear_glock - look at a glock and see if we can free it from glock cache
2017 * @gl: the glock to look at
2021 static void clear_glock(struct gfs2_glock *gl)
2023 struct gfs2_sbd *sdp = gl->gl_sbd;
2026 spin_lock(&sdp->sd_reclaim_lock);
2027 if (!list_empty(&gl->gl_reclaim)) {
2028 list_del_init(&gl->gl_reclaim);
2029 atomic_dec(&sdp->sd_reclaim_count);
2030 spin_unlock(&sdp->sd_reclaim_lock);
2031 released = gfs2_glock_put(gl);
2032 gfs2_assert(sdp, !released);
2034 spin_unlock(&sdp->sd_reclaim_lock);
2037 if (gfs2_glmutex_trylock(gl)) {
2038 if (queue_empty(gl, &gl->gl_holders) &&
2039 gl->gl_state != LM_ST_UNLOCKED)
2040 handle_callback(gl, LM_ST_UNLOCKED);
2042 gfs2_glmutex_unlock(gl);
2049 * gfs2_gl_hash_clear - Empty out the glock hash table
2050 * @sdp: the filesystem
2051 * @wait: wait until it's all gone
2053 * Called when unmounting the filesystem, or when inter-node lock manager
2054 * requests DROPLOCKS because it is running out of capacity.
2057 void gfs2_gl_hash_clear(struct gfs2_sbd *sdp, int wait)
2068 for (x = 0; x < GFS2_GL_HASH_SIZE; x++)
2069 if (examine_bucket(clear_glock, sdp, x))
2075 if (time_after_eq(jiffies,
2076 t + gfs2_tune_get(sdp, gt_stall_secs) * HZ)) {
2077 fs_warn(sdp, "Unmount seems to be stalled. "
2078 "Dumping lock state...\n");
2079 gfs2_dump_lockstate(sdp);
2083 invalidate_inodes(sdp->sd_vfs);
2089 * Diagnostic routines to help debug distributed deadlock
2093 * dump_holder - print information about a glock holder
2094 * @str: a string naming the type of holder
2095 * @gh: the glock holder
2097 * Returns: 0 on success, -ENOBUFS when we run out of space
2100 static int dump_holder(char *str, struct gfs2_holder *gh)
2103 int error = -ENOBUFS;
2105 printk(KERN_INFO " %s\n", str);
2106 printk(KERN_INFO " owner = %ld\n",
2107 (gh->gh_owner) ? (long)gh->gh_owner->pid : -1);
2108 printk(KERN_INFO " gh_state = %u\n", gh->gh_state);
2109 printk(KERN_INFO " gh_flags =");
2110 for (x = 0; x < 32; x++)
2111 if (gh->gh_flags & (1 << x))
2114 printk(KERN_INFO " error = %d\n", gh->gh_error);
2115 printk(KERN_INFO " gh_iflags =");
2116 for (x = 0; x < 32; x++)
2117 if (test_bit(x, &gh->gh_iflags))
2120 print_symbol(KERN_INFO " initialized at: %s\n", gh->gh_ip);
2128 * dump_inode - print information about an inode
2131 * Returns: 0 on success, -ENOBUFS when we run out of space
2134 static int dump_inode(struct gfs2_inode *ip)
2137 int error = -ENOBUFS;
2139 printk(KERN_INFO " Inode:\n");
2140 printk(KERN_INFO " num = %llu %llu\n",
2141 (unsigned long long)ip->i_num.no_formal_ino,
2142 (unsigned long long)ip->i_num.no_addr);
2143 printk(KERN_INFO " type = %u\n", IF2DT(ip->i_di.di_mode));
2144 printk(KERN_INFO " i_flags =");
2145 for (x = 0; x < 32; x++)
2146 if (test_bit(x, &ip->i_flags))
2156 * dump_glock - print information about a glock
2158 * @count: where we are in the buffer
2160 * Returns: 0 on success, -ENOBUFS when we run out of space
2163 static int dump_glock(struct gfs2_glock *gl)
2165 struct gfs2_holder *gh;
2167 int error = -ENOBUFS;
2169 spin_lock(&gl->gl_spin);
2171 printk(KERN_INFO "Glock 0x%p (%u, %llu)\n", gl, gl->gl_name.ln_type,
2172 (unsigned long long)gl->gl_name.ln_number);
2173 printk(KERN_INFO " gl_flags =");
2174 for (x = 0; x < 32; x++) {
2175 if (test_bit(x, &gl->gl_flags))
2179 printk(KERN_INFO " gl_ref = %d\n", atomic_read(&gl->gl_ref.refcount));
2180 printk(KERN_INFO " gl_state = %u\n", gl->gl_state);
2181 printk(KERN_INFO " gl_owner = %s\n", gl->gl_owner->comm);
2182 print_symbol(KERN_INFO " gl_ip = %s\n", gl->gl_ip);
2183 printk(KERN_INFO " req_gh = %s\n", (gl->gl_req_gh) ? "yes" : "no");
2184 printk(KERN_INFO " req_bh = %s\n", (gl->gl_req_bh) ? "yes" : "no");
2185 printk(KERN_INFO " lvb_count = %d\n", atomic_read(&gl->gl_lvb_count));
2186 printk(KERN_INFO " object = %s\n", (gl->gl_object) ? "yes" : "no");
2187 printk(KERN_INFO " le = %s\n",
2188 (list_empty(&gl->gl_le.le_list)) ? "no" : "yes");
2189 printk(KERN_INFO " reclaim = %s\n",
2190 (list_empty(&gl->gl_reclaim)) ? "no" : "yes");
2192 printk(KERN_INFO " aspace = 0x%p nrpages = %lu\n", gl->gl_aspace,
2193 gl->gl_aspace->i_mapping->nrpages);
2195 printk(KERN_INFO " aspace = no\n");
2196 printk(KERN_INFO " ail = %d\n", atomic_read(&gl->gl_ail_count));
2197 if (gl->gl_req_gh) {
2198 error = dump_holder("Request", gl->gl_req_gh);
2202 list_for_each_entry(gh, &gl->gl_holders, gh_list) {
2203 error = dump_holder("Holder", gh);
2207 list_for_each_entry(gh, &gl->gl_waiters1, gh_list) {
2208 error = dump_holder("Waiter1", gh);
2212 list_for_each_entry(gh, &gl->gl_waiters2, gh_list) {
2213 error = dump_holder("Waiter2", gh);
2217 list_for_each_entry(gh, &gl->gl_waiters3, gh_list) {
2218 error = dump_holder("Waiter3", gh);
2222 if (gl->gl_ops == &gfs2_inode_glops && gl->gl_object) {
2223 if (!test_bit(GLF_LOCK, &gl->gl_flags) &&
2224 list_empty(&gl->gl_holders)) {
2225 error = dump_inode(gl->gl_object);
2230 printk(KERN_INFO " Inode: busy\n");
2237 spin_unlock(&gl->gl_spin);
2242 * gfs2_dump_lockstate - print out the current lockstate
2243 * @sdp: the filesystem
2244 * @ub: the buffer to copy the information into
2246 * If @ub is NULL, dump the lockstate to the console.
2250 static int gfs2_dump_lockstate(struct gfs2_sbd *sdp)
2252 struct gfs2_glock *gl;
2256 for (x = 0; x < GFS2_GL_HASH_SIZE; x++) {
2258 read_lock(gl_lock_addr(x));
2260 list_for_each_entry(gl, &gl_hash_table[x].hb_list, gl_list) {
2261 if (test_bit(GLF_PLUG, &gl->gl_flags))
2263 if (gl->gl_sbd != sdp)
2266 error = dump_glock(gl);
2271 read_unlock(gl_lock_addr(x));
2281 int __init gfs2_glock_init(void)
2284 for(i = 0; i < GFS2_GL_HASH_SIZE; i++) {
2285 INIT_LIST_HEAD(&gl_hash_table[i].hb_list);
2287 #ifdef GL_HASH_LOCK_SZ
2288 for(i = 0; i < GL_HASH_LOCK_SZ; i++) {
2289 rwlock_init(&gl_hash_locks[i]);