#include <linux/delay.h>
#include <linux/sort.h>
#include <linux/jhash.h>
-#include <linux/kref.h>
#include <linux/kallsyms.h>
#include <linux/gfs2_ondisk.h>
#include <linux/list.h>
+#include <linux/lm_interface.h>
#include <asm/uaccess.h>
#include "gfs2.h"
-#include "lm_interface.h"
#include "incore.h"
#include "glock.h"
#include "glops.h"
void gfs2_glock_hold(struct gfs2_glock *gl)
{
- kref_get(&gl->gl_ref);
-}
-
-/* All work is done after the return from kref_put() so we
- can release the write_lock before the free. */
-
-static void kill_glock(struct kref *kref)
-{
- struct gfs2_glock *gl = container_of(kref, struct gfs2_glock, gl_ref);
- struct gfs2_sbd *sdp = gl->gl_sbd;
-
- gfs2_assert(sdp, gl->gl_state == LM_ST_UNLOCKED);
- gfs2_assert(sdp, list_empty(&gl->gl_reclaim));
- gfs2_assert(sdp, list_empty(&gl->gl_holders));
- gfs2_assert(sdp, list_empty(&gl->gl_waiters1));
- gfs2_assert(sdp, list_empty(&gl->gl_waiters2));
- gfs2_assert(sdp, list_empty(&gl->gl_waiters3));
+ atomic_inc(&gl->gl_ref);
}
/**
int gfs2_glock_put(struct gfs2_glock *gl)
{
int rv = 0;
+ struct gfs2_sbd *sdp = gl->gl_sbd;
write_lock(gl_lock_addr(gl->gl_hash));
- if (kref_put(&gl->gl_ref, kill_glock)) {
+ if (atomic_dec_and_test(&gl->gl_ref)) {
hlist_del(&gl->gl_list);
write_unlock(gl_lock_addr(gl->gl_hash));
BUG_ON(spin_is_locked(&gl->gl_spin));
+ gfs2_assert(sdp, gl->gl_state == LM_ST_UNLOCKED);
+ gfs2_assert(sdp, list_empty(&gl->gl_reclaim));
+ gfs2_assert(sdp, list_empty(&gl->gl_holders));
+ gfs2_assert(sdp, list_empty(&gl->gl_waiters1));
+ gfs2_assert(sdp, list_empty(&gl->gl_waiters2));
+ gfs2_assert(sdp, list_empty(&gl->gl_waiters3));
glock_free(gl);
rv = 1;
goto out;
if (gl->gl_sbd != sdp)
continue;
- kref_get(&gl->gl_ref);
+ atomic_inc(&gl->gl_ref);
return gl;
}
gl->gl_flags = 0;
gl->gl_name = name;
- kref_init(&gl->gl_ref);
+ atomic_set(&gl->gl_ref, 1);
gl->gl_state = LM_ST_UNLOCKED;
gl->gl_hash = hash;
gl->gl_owner = NULL;
if (gl->gl_aspace)
gfs2_aspace_put(gl->gl_aspace);
fail:
- kmem_cache_free(gfs2_glock_cachep, gl);
+ kmem_cache_free(gfs2_glock_cachep, gl);
return error;
}
gfs2_holder_uninit(gh);
kfree(container_of(gh, struct greedy, gr_gh));
- spin_lock(&gl->gl_spin);
+ spin_lock(&gl->gl_spin);
return 0;
}
if (existing) {
print_symbol(KERN_WARNING "original: %s\n", existing->gh_ip);
printk(KERN_INFO "pid : %d\n", existing->gh_owner->pid);
- printk(KERN_INFO "lock type : %d lock state : %d\n",
+ printk(KERN_INFO "lock type : %d lock state : %d\n",
existing->gh_gl->gl_name.ln_type, existing->gh_gl->gl_state);
print_symbol(KERN_WARNING "new: %s\n", gh->gh_ip);
printk(KERN_INFO "pid : %d\n", gh->gh_owner->pid);
- printk(KERN_INFO "lock type : %d lock state : %d\n",
+ printk(KERN_INFO "lock type : %d lock state : %d\n",
gl->gl_name.ln_type, gl->gl_state);
BUG();
}
if (gh->gh_flags & LM_FLAG_PRIORITY)
list_add(&gh->gh_list, &gl->gl_waiters3);
else
- list_add_tail(&gh->gh_list, &gl->gl_waiters3);
+ list_add_tail(&gh->gh_list, &gl->gl_waiters3);
}
/**
/* Can't use hlist_for_each_entry - don't want prefetch here */
if (hlist_empty(head))
goto out;
- has_entries = 1;
gl = list_entry(head->first, struct gfs2_glock, gl_list);
while(1) {
if (gl->gl_sbd == sdp) {
gfs2_glock_put(prev);
prev = gl;
examiner(gl);
+ has_entries = 1;
read_lock(gl_lock_addr(hash));
}
if (gl->gl_list.next == NULL)
for (;;) {
cont = 0;
for (x = 0; x < GFS2_GL_HASH_SIZE; x++) {
- if (examine_bucket(clear_glock, sdp, x))
+ if (examine_bucket(clear_glock, sdp, x))
cont = 1;
}
printk(" %u", x);
}
printk(" \n");
- printk(KERN_INFO " gl_ref = %d\n", atomic_read(&gl->gl_ref.refcount));
+ printk(KERN_INFO " gl_ref = %d\n", atomic_read(&gl->gl_ref));
printk(KERN_INFO " gl_state = %u\n", gl->gl_state);
printk(KERN_INFO " gl_owner = %s\n", gl->gl_owner->comm);
print_symbol(KERN_INFO " gl_ip = %s\n", gl->gl_ip);