1 /* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
6 * standalone DLM module
8 * Copyright (C) 2004 Oracle. All rights reserved.
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public
12 * License as published by the Free Software Foundation; either
13 * version 2 of the License, or (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
20 * You should have received a copy of the GNU General Public
21 * License along with this program; if not, write to the
22 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
23 * Boston, MA 021110-1307, USA.
28 #include <linux/module.h>
30 #include <linux/types.h>
31 #include <linux/slab.h>
32 #include <linux/highmem.h>
33 #include <linux/utsname.h>
34 #include <linux/init.h>
35 #include <linux/sysctl.h>
36 #include <linux/random.h>
37 #include <linux/blkdev.h>
38 #include <linux/socket.h>
39 #include <linux/inet.h>
40 #include <linux/spinlock.h>
41 #include <linux/delay.h>
44 #include "cluster/heartbeat.h"
45 #include "cluster/nodemanager.h"
46 #include "cluster/tcp.h"
49 #include "dlmcommon.h"
50 #include "dlmdomain.h"
52 #define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_MASTER)
53 #include "cluster/masklog.h"
64 u8 name[DLM_LOCKID_NAME_MAX];
67 struct dlm_master_list_entry
69 struct list_head list;
70 struct list_head hb_events;
77 unsigned long maybe_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
78 unsigned long vote_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
79 unsigned long response_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
80 unsigned long node_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
83 enum dlm_mle_type type;
84 struct o2hb_callback_func mle_hb_up;
85 struct o2hb_callback_func mle_hb_down;
87 struct dlm_lock_resource *res;
88 struct dlm_lock_name name;
92 static void dlm_mle_node_down(struct dlm_ctxt *dlm,
93 struct dlm_master_list_entry *mle,
94 struct o2nm_node *node,
96 static void dlm_mle_node_up(struct dlm_ctxt *dlm,
97 struct dlm_master_list_entry *mle,
98 struct o2nm_node *node,
101 static void dlm_assert_master_worker(struct dlm_work_item *item, void *data);
102 static int dlm_do_assert_master(struct dlm_ctxt *dlm,
103 struct dlm_lock_resource *res,
104 void *nodemap, u32 flags);
105 static void dlm_deref_lockres_worker(struct dlm_work_item *item, void *data);
107 static inline int dlm_mle_equal(struct dlm_ctxt *dlm,
108 struct dlm_master_list_entry *mle,
110 unsigned int namelen)
112 struct dlm_lock_resource *res;
117 if (mle->type == DLM_MLE_BLOCK ||
118 mle->type == DLM_MLE_MIGRATION) {
119 if (namelen != mle->u.name.len ||
120 memcmp(name, mle->u.name.name, namelen)!=0)
124 if (namelen != res->lockname.len ||
125 memcmp(res->lockname.name, name, namelen) != 0)
131 #define dlm_print_nodemap(m) _dlm_print_nodemap(m,#m)
132 static void _dlm_print_nodemap(unsigned long *map, const char *mapname)
135 printk("%s=[ ", mapname);
136 for (i=0; i<O2NM_MAX_NODES; i++)
137 if (test_bit(i, map))
142 static void dlm_print_one_mle(struct dlm_master_list_entry *mle)
148 unsigned int namelen;
151 unsigned long *maybe = mle->maybe_map,
152 *vote = mle->vote_map,
153 *resp = mle->response_map,
154 *node = mle->node_map;
157 if (mle->type == DLM_MLE_BLOCK)
159 else if (mle->type == DLM_MLE_MASTER)
163 refs = atomic_read(&k->refcount);
164 master = mle->master;
165 attached = (list_empty(&mle->hb_events) ? 'N' : 'Y');
167 if (mle->type != DLM_MLE_MASTER) {
168 namelen = mle->u.name.len;
169 name = mle->u.name.name;
171 namelen = mle->u.res->lockname.len;
172 name = mle->u.res->lockname.name;
175 mlog(ML_NOTICE, "%.*s: %3s refs=%3d mas=%3u new=%3u evt=%c inuse=%d ",
176 namelen, name, type, refs, master, mle->new_master, attached,
178 dlm_print_nodemap(maybe);
180 dlm_print_nodemap(vote);
182 dlm_print_nodemap(resp);
184 dlm_print_nodemap(node);
190 /* Code here is included but defined out as it aids debugging */
192 static void dlm_dump_mles(struct dlm_ctxt *dlm)
194 struct dlm_master_list_entry *mle;
196 mlog(ML_NOTICE, "dumping all mles for domain %s:\n", dlm->name);
197 spin_lock(&dlm->master_lock);
198 list_for_each_entry(mle, &dlm->master_list, list)
199 dlm_print_one_mle(mle);
200 spin_unlock(&dlm->master_lock);
203 int dlm_dump_all_mles(const char __user *data, unsigned int len)
205 struct dlm_ctxt *dlm;
207 spin_lock(&dlm_domain_lock);
208 list_for_each_entry(dlm, &dlm_domains, list) {
209 mlog(ML_NOTICE, "found dlm: %p, name=%s\n", dlm, dlm->name);
212 spin_unlock(&dlm_domain_lock);
215 EXPORT_SYMBOL_GPL(dlm_dump_all_mles);
219 static struct kmem_cache *dlm_lockres_cache = NULL;
220 static struct kmem_cache *dlm_lockname_cache = NULL;
221 static struct kmem_cache *dlm_mle_cache = NULL;
223 static void dlm_mle_release(struct kref *kref);
224 static void dlm_init_mle(struct dlm_master_list_entry *mle,
225 enum dlm_mle_type type,
226 struct dlm_ctxt *dlm,
227 struct dlm_lock_resource *res,
229 unsigned int namelen);
230 static void dlm_put_mle(struct dlm_master_list_entry *mle);
231 static void __dlm_put_mle(struct dlm_master_list_entry *mle);
232 static int dlm_find_mle(struct dlm_ctxt *dlm,
233 struct dlm_master_list_entry **mle,
234 char *name, unsigned int namelen);
236 static int dlm_do_master_request(struct dlm_lock_resource *res,
237 struct dlm_master_list_entry *mle, int to);
240 static int dlm_wait_for_lock_mastery(struct dlm_ctxt *dlm,
241 struct dlm_lock_resource *res,
242 struct dlm_master_list_entry *mle,
244 static int dlm_restart_lock_mastery(struct dlm_ctxt *dlm,
245 struct dlm_lock_resource *res,
246 struct dlm_master_list_entry *mle,
248 static int dlm_add_migration_mle(struct dlm_ctxt *dlm,
249 struct dlm_lock_resource *res,
250 struct dlm_master_list_entry *mle,
251 struct dlm_master_list_entry **oldmle,
252 const char *name, unsigned int namelen,
253 u8 new_master, u8 master);
255 static u8 dlm_pick_migration_target(struct dlm_ctxt *dlm,
256 struct dlm_lock_resource *res);
257 static void dlm_remove_nonlocal_locks(struct dlm_ctxt *dlm,
258 struct dlm_lock_resource *res);
259 static int dlm_mark_lockres_migrating(struct dlm_ctxt *dlm,
260 struct dlm_lock_resource *res,
262 static int dlm_pre_master_reco_lockres(struct dlm_ctxt *dlm,
263 struct dlm_lock_resource *res);
266 int dlm_is_host_down(int errno)
283 case -EINVAL: /* if returned from our tcp code,
284 this means there is no socket */
292 * MASTER LIST FUNCTIONS
297 * regarding master list entries and heartbeat callbacks:
299 * in order to avoid sleeping and allocation that occurs in
300 * heartbeat, master list entries are simply attached to the
301 * dlm's established heartbeat callbacks. the mle is attached
302 * when it is created, and since the dlm->spinlock is held at
303 * that time, any heartbeat event will be properly discovered
304 * by the mle. the mle needs to be detached from the
305 * dlm->mle_hb_events list as soon as heartbeat events are no
306 * longer useful to the mle, and before the mle is freed.
308 * as a general rule, heartbeat events are no longer needed by
309 * the mle once an "answer" regarding the lock master has been
312 static inline void __dlm_mle_attach_hb_events(struct dlm_ctxt *dlm,
313 struct dlm_master_list_entry *mle)
315 assert_spin_locked(&dlm->spinlock);
317 list_add_tail(&mle->hb_events, &dlm->mle_hb_events);
321 static inline void __dlm_mle_detach_hb_events(struct dlm_ctxt *dlm,
322 struct dlm_master_list_entry *mle)
324 if (!list_empty(&mle->hb_events))
325 list_del_init(&mle->hb_events);
329 static inline void dlm_mle_detach_hb_events(struct dlm_ctxt *dlm,
330 struct dlm_master_list_entry *mle)
332 spin_lock(&dlm->spinlock);
333 __dlm_mle_detach_hb_events(dlm, mle);
334 spin_unlock(&dlm->spinlock);
337 static void dlm_get_mle_inuse(struct dlm_master_list_entry *mle)
339 struct dlm_ctxt *dlm;
342 assert_spin_locked(&dlm->spinlock);
343 assert_spin_locked(&dlm->master_lock);
345 kref_get(&mle->mle_refs);
348 static void dlm_put_mle_inuse(struct dlm_master_list_entry *mle)
350 struct dlm_ctxt *dlm;
353 spin_lock(&dlm->spinlock);
354 spin_lock(&dlm->master_lock);
357 spin_unlock(&dlm->master_lock);
358 spin_unlock(&dlm->spinlock);
362 /* remove from list and free */
363 static void __dlm_put_mle(struct dlm_master_list_entry *mle)
365 struct dlm_ctxt *dlm;
368 assert_spin_locked(&dlm->spinlock);
369 assert_spin_locked(&dlm->master_lock);
370 if (!atomic_read(&mle->mle_refs.refcount)) {
371 /* this may or may not crash, but who cares.
373 mlog(ML_ERROR, "bad mle: %p\n", mle);
374 dlm_print_one_mle(mle);
377 kref_put(&mle->mle_refs, dlm_mle_release);
381 /* must not have any spinlocks coming in */
382 static void dlm_put_mle(struct dlm_master_list_entry *mle)
384 struct dlm_ctxt *dlm;
387 spin_lock(&dlm->spinlock);
388 spin_lock(&dlm->master_lock);
390 spin_unlock(&dlm->master_lock);
391 spin_unlock(&dlm->spinlock);
394 static inline void dlm_get_mle(struct dlm_master_list_entry *mle)
396 kref_get(&mle->mle_refs);
399 static void dlm_init_mle(struct dlm_master_list_entry *mle,
400 enum dlm_mle_type type,
401 struct dlm_ctxt *dlm,
402 struct dlm_lock_resource *res,
404 unsigned int namelen)
406 assert_spin_locked(&dlm->spinlock);
410 INIT_LIST_HEAD(&mle->list);
411 INIT_LIST_HEAD(&mle->hb_events);
412 memset(mle->maybe_map, 0, sizeof(mle->maybe_map));
413 spin_lock_init(&mle->spinlock);
414 init_waitqueue_head(&mle->wq);
415 atomic_set(&mle->woken, 0);
416 kref_init(&mle->mle_refs);
417 memset(mle->response_map, 0, sizeof(mle->response_map));
418 mle->master = O2NM_MAX_NODES;
419 mle->new_master = O2NM_MAX_NODES;
422 if (mle->type == DLM_MLE_MASTER) {
425 } else if (mle->type == DLM_MLE_BLOCK) {
427 memcpy(mle->u.name.name, name, namelen);
428 mle->u.name.len = namelen;
429 } else /* DLM_MLE_MIGRATION */ {
431 memcpy(mle->u.name.name, name, namelen);
432 mle->u.name.len = namelen;
435 /* copy off the node_map and register hb callbacks on our copy */
436 memcpy(mle->node_map, dlm->domain_map, sizeof(mle->node_map));
437 memcpy(mle->vote_map, dlm->domain_map, sizeof(mle->vote_map));
438 clear_bit(dlm->node_num, mle->vote_map);
439 clear_bit(dlm->node_num, mle->node_map);
441 /* attach the mle to the domain node up/down events */
442 __dlm_mle_attach_hb_events(dlm, mle);
446 /* returns 1 if found, 0 if not */
447 static int dlm_find_mle(struct dlm_ctxt *dlm,
448 struct dlm_master_list_entry **mle,
449 char *name, unsigned int namelen)
451 struct dlm_master_list_entry *tmpmle;
453 assert_spin_locked(&dlm->master_lock);
455 list_for_each_entry(tmpmle, &dlm->master_list, list) {
456 if (!dlm_mle_equal(dlm, tmpmle, name, namelen))
465 void dlm_hb_event_notify_attached(struct dlm_ctxt *dlm, int idx, int node_up)
467 struct dlm_master_list_entry *mle;
469 assert_spin_locked(&dlm->spinlock);
471 list_for_each_entry(mle, &dlm->mle_hb_events, hb_events) {
473 dlm_mle_node_up(dlm, mle, NULL, idx);
475 dlm_mle_node_down(dlm, mle, NULL, idx);
479 static void dlm_mle_node_down(struct dlm_ctxt *dlm,
480 struct dlm_master_list_entry *mle,
481 struct o2nm_node *node, int idx)
483 spin_lock(&mle->spinlock);
485 if (!test_bit(idx, mle->node_map))
486 mlog(0, "node %u already removed from nodemap!\n", idx);
488 clear_bit(idx, mle->node_map);
490 spin_unlock(&mle->spinlock);
493 static void dlm_mle_node_up(struct dlm_ctxt *dlm,
494 struct dlm_master_list_entry *mle,
495 struct o2nm_node *node, int idx)
497 spin_lock(&mle->spinlock);
499 if (test_bit(idx, mle->node_map))
500 mlog(0, "node %u already in node map!\n", idx);
502 set_bit(idx, mle->node_map);
504 spin_unlock(&mle->spinlock);
508 int dlm_init_mle_cache(void)
510 dlm_mle_cache = kmem_cache_create("o2dlm_mle",
511 sizeof(struct dlm_master_list_entry),
512 0, SLAB_HWCACHE_ALIGN,
514 if (dlm_mle_cache == NULL)
519 void dlm_destroy_mle_cache(void)
522 kmem_cache_destroy(dlm_mle_cache);
525 static void dlm_mle_release(struct kref *kref)
527 struct dlm_master_list_entry *mle;
528 struct dlm_ctxt *dlm;
532 mle = container_of(kref, struct dlm_master_list_entry, mle_refs);
535 if (mle->type != DLM_MLE_MASTER) {
536 mlog(0, "calling mle_release for %.*s, type %d\n",
537 mle->u.name.len, mle->u.name.name, mle->type);
539 mlog(0, "calling mle_release for %.*s, type %d\n",
540 mle->u.res->lockname.len,
541 mle->u.res->lockname.name, mle->type);
543 assert_spin_locked(&dlm->spinlock);
544 assert_spin_locked(&dlm->master_lock);
546 /* remove from list if not already */
547 if (!list_empty(&mle->list))
548 list_del_init(&mle->list);
550 /* detach the mle from the domain node up/down events */
551 __dlm_mle_detach_hb_events(dlm, mle);
553 /* NOTE: kfree under spinlock here.
554 * if this is bad, we can move this to a freelist. */
555 kmem_cache_free(dlm_mle_cache, mle);
560 * LOCK RESOURCE FUNCTIONS
563 int dlm_init_master_caches(void)
565 dlm_lockres_cache = kmem_cache_create("o2dlm_lockres",
566 sizeof(struct dlm_lock_resource),
567 0, SLAB_HWCACHE_ALIGN, NULL);
568 if (!dlm_lockres_cache)
571 dlm_lockname_cache = kmem_cache_create("o2dlm_lockname",
572 DLM_LOCKID_NAME_MAX, 0,
573 SLAB_HWCACHE_ALIGN, NULL);
574 if (!dlm_lockname_cache)
579 dlm_destroy_master_caches();
583 void dlm_destroy_master_caches(void)
585 if (dlm_lockname_cache)
586 kmem_cache_destroy(dlm_lockname_cache);
588 if (dlm_lockres_cache)
589 kmem_cache_destroy(dlm_lockres_cache);
592 static void dlm_set_lockres_owner(struct dlm_ctxt *dlm,
593 struct dlm_lock_resource *res,
596 assert_spin_locked(&res->spinlock);
598 mlog_entry("%.*s, %u\n", res->lockname.len, res->lockname.name, owner);
600 if (owner == dlm->node_num)
601 atomic_inc(&dlm->local_resources);
602 else if (owner == DLM_LOCK_RES_OWNER_UNKNOWN)
603 atomic_inc(&dlm->unknown_resources);
605 atomic_inc(&dlm->remote_resources);
610 void dlm_change_lockres_owner(struct dlm_ctxt *dlm,
611 struct dlm_lock_resource *res, u8 owner)
613 assert_spin_locked(&res->spinlock);
615 if (owner == res->owner)
618 if (res->owner == dlm->node_num)
619 atomic_dec(&dlm->local_resources);
620 else if (res->owner == DLM_LOCK_RES_OWNER_UNKNOWN)
621 atomic_dec(&dlm->unknown_resources);
623 atomic_dec(&dlm->remote_resources);
625 dlm_set_lockres_owner(dlm, res, owner);
629 static void dlm_lockres_release(struct kref *kref)
631 struct dlm_lock_resource *res;
633 res = container_of(kref, struct dlm_lock_resource, refs);
635 /* This should not happen -- all lockres' have a name
636 * associated with them at init time. */
637 BUG_ON(!res->lockname.name);
639 mlog(0, "destroying lockres %.*s\n", res->lockname.len,
642 if (!hlist_unhashed(&res->hash_node) ||
643 !list_empty(&res->granted) ||
644 !list_empty(&res->converting) ||
645 !list_empty(&res->blocked) ||
646 !list_empty(&res->dirty) ||
647 !list_empty(&res->recovering) ||
648 !list_empty(&res->purge)) {
650 "Going to BUG for resource %.*s."
651 " We're on a list! [%c%c%c%c%c%c%c]\n",
652 res->lockname.len, res->lockname.name,
653 !hlist_unhashed(&res->hash_node) ? 'H' : ' ',
654 !list_empty(&res->granted) ? 'G' : ' ',
655 !list_empty(&res->converting) ? 'C' : ' ',
656 !list_empty(&res->blocked) ? 'B' : ' ',
657 !list_empty(&res->dirty) ? 'D' : ' ',
658 !list_empty(&res->recovering) ? 'R' : ' ',
659 !list_empty(&res->purge) ? 'P' : ' ');
661 dlm_print_one_lock_resource(res);
664 /* By the time we're ready to blow this guy away, we shouldn't
665 * be on any lists. */
666 BUG_ON(!hlist_unhashed(&res->hash_node));
667 BUG_ON(!list_empty(&res->granted));
668 BUG_ON(!list_empty(&res->converting));
669 BUG_ON(!list_empty(&res->blocked));
670 BUG_ON(!list_empty(&res->dirty));
671 BUG_ON(!list_empty(&res->recovering));
672 BUG_ON(!list_empty(&res->purge));
674 kmem_cache_free(dlm_lockname_cache, (void *)res->lockname.name);
676 kmem_cache_free(dlm_lockres_cache, res);
679 void dlm_lockres_put(struct dlm_lock_resource *res)
681 kref_put(&res->refs, dlm_lockres_release);
684 static void dlm_init_lockres(struct dlm_ctxt *dlm,
685 struct dlm_lock_resource *res,
686 const char *name, unsigned int namelen)
690 /* If we memset here, we lose our reference to the kmalloc'd
691 * res->lockname.name, so be sure to init every field
694 qname = (char *) res->lockname.name;
695 memcpy(qname, name, namelen);
697 res->lockname.len = namelen;
698 res->lockname.hash = dlm_lockid_hash(name, namelen);
700 init_waitqueue_head(&res->wq);
701 spin_lock_init(&res->spinlock);
702 INIT_HLIST_NODE(&res->hash_node);
703 INIT_LIST_HEAD(&res->granted);
704 INIT_LIST_HEAD(&res->converting);
705 INIT_LIST_HEAD(&res->blocked);
706 INIT_LIST_HEAD(&res->dirty);
707 INIT_LIST_HEAD(&res->recovering);
708 INIT_LIST_HEAD(&res->purge);
709 atomic_set(&res->asts_reserved, 0);
710 res->migration_pending = 0;
711 res->inflight_locks = 0;
713 kref_init(&res->refs);
715 /* just for consistency */
716 spin_lock(&res->spinlock);
717 dlm_set_lockres_owner(dlm, res, DLM_LOCK_RES_OWNER_UNKNOWN);
718 spin_unlock(&res->spinlock);
720 res->state = DLM_LOCK_RES_IN_PROGRESS;
724 memset(res->lvb, 0, DLM_LVB_LEN);
725 memset(res->refmap, 0, sizeof(res->refmap));
728 struct dlm_lock_resource *dlm_new_lockres(struct dlm_ctxt *dlm,
730 unsigned int namelen)
732 struct dlm_lock_resource *res = NULL;
734 res = (struct dlm_lock_resource *)
735 kmem_cache_zalloc(dlm_lockres_cache, GFP_NOFS);
739 res->lockname.name = (char *)
740 kmem_cache_zalloc(dlm_lockname_cache, GFP_NOFS);
741 if (!res->lockname.name)
744 dlm_init_lockres(dlm, res, name, namelen);
748 if (res && res->lockname.name)
749 kmem_cache_free(dlm_lockname_cache, (void *)res->lockname.name);
752 kmem_cache_free(dlm_lockres_cache, res);
756 void __dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm,
757 struct dlm_lock_resource *res,
763 assert_spin_locked(&res->spinlock);
765 if (!test_bit(dlm->node_num, res->refmap)) {
766 BUG_ON(res->inflight_locks != 0);
767 dlm_lockres_set_refmap_bit(dlm->node_num, res);
769 res->inflight_locks++;
770 mlog(0, "%s:%.*s: inflight++: now %u\n",
771 dlm->name, res->lockname.len, res->lockname.name,
772 res->inflight_locks);
775 void __dlm_lockres_drop_inflight_ref(struct dlm_ctxt *dlm,
776 struct dlm_lock_resource *res,
780 assert_spin_locked(&res->spinlock);
782 BUG_ON(res->inflight_locks == 0);
783 res->inflight_locks--;
784 mlog(0, "%s:%.*s: inflight--: now %u\n",
785 dlm->name, res->lockname.len, res->lockname.name,
786 res->inflight_locks);
787 if (res->inflight_locks == 0)
788 dlm_lockres_clear_refmap_bit(dlm->node_num, res);
793 * lookup a lock resource by name.
794 * may already exist in the hashtable.
795 * lockid is null terminated
797 * if not, allocate enough for the lockres and for
798 * the temporary structure used in doing the mastering.
800 * also, do a lookup in the dlm->master_list to see
801 * if another node has begun mastering the same lock.
802 * if so, there should be a block entry in there
803 * for this name, and we should *not* attempt to master
804 * the lock here. need to wait around for that node
805 * to assert_master (or die).
808 struct dlm_lock_resource * dlm_get_lock_resource(struct dlm_ctxt *dlm,
813 struct dlm_lock_resource *tmpres=NULL, *res=NULL;
814 struct dlm_master_list_entry *mle = NULL;
815 struct dlm_master_list_entry *alloc_mle = NULL;
818 struct dlm_node_iter iter;
821 int bit, wait_on_recovery = 0;
822 int drop_inflight_if_nonlocal = 0;
826 hash = dlm_lockid_hash(lockid, namelen);
828 mlog(0, "get lockres %s (len %d)\n", lockid, namelen);
831 spin_lock(&dlm->spinlock);
832 tmpres = __dlm_lookup_lockres_full(dlm, lockid, namelen, hash);
834 int dropping_ref = 0;
836 spin_lock(&tmpres->spinlock);
837 if (tmpres->owner == dlm->node_num) {
838 BUG_ON(tmpres->state & DLM_LOCK_RES_DROPPING_REF);
839 dlm_lockres_grab_inflight_ref(dlm, tmpres);
840 } else if (tmpres->state & DLM_LOCK_RES_DROPPING_REF)
842 spin_unlock(&tmpres->spinlock);
843 spin_unlock(&dlm->spinlock);
845 /* wait until done messaging the master, drop our ref to allow
846 * the lockres to be purged, start over. */
848 spin_lock(&tmpres->spinlock);
849 __dlm_wait_on_lockres_flags(tmpres, DLM_LOCK_RES_DROPPING_REF);
850 spin_unlock(&tmpres->spinlock);
851 dlm_lockres_put(tmpres);
856 mlog(0, "found in hash!\n");
858 dlm_lockres_put(res);
864 spin_unlock(&dlm->spinlock);
865 mlog(0, "allocating a new resource\n");
866 /* nothing found and we need to allocate one. */
867 alloc_mle = (struct dlm_master_list_entry *)
868 kmem_cache_alloc(dlm_mle_cache, GFP_NOFS);
871 res = dlm_new_lockres(dlm, lockid, namelen);
877 mlog(0, "no lockres found, allocated our own: %p\n", res);
879 if (flags & LKM_LOCAL) {
880 /* caller knows it's safe to assume it's not mastered elsewhere
881 * DONE! return right away */
882 spin_lock(&res->spinlock);
883 dlm_change_lockres_owner(dlm, res, dlm->node_num);
884 __dlm_insert_lockres(dlm, res);
885 dlm_lockres_grab_inflight_ref(dlm, res);
886 spin_unlock(&res->spinlock);
887 spin_unlock(&dlm->spinlock);
888 /* lockres still marked IN_PROGRESS */
892 /* check master list to see if another node has started mastering it */
893 spin_lock(&dlm->master_lock);
895 /* if we found a block, wait for lock to be mastered by another node */
896 blocked = dlm_find_mle(dlm, &mle, (char *)lockid, namelen);
899 if (mle->type == DLM_MLE_MASTER) {
900 mlog(ML_ERROR, "master entry for nonexistent lock!\n");
903 mig = (mle->type == DLM_MLE_MIGRATION);
904 /* if there is a migration in progress, let the migration
905 * finish before continuing. we can wait for the absence
906 * of the MIGRATION mle: either the migrate finished or
907 * one of the nodes died and the mle was cleaned up.
908 * if there is a BLOCK here, but it already has a master
909 * set, we are too late. the master does not have a ref
910 * for us in the refmap. detach the mle and drop it.
911 * either way, go back to the top and start over. */
912 if (mig || mle->master != O2NM_MAX_NODES) {
913 BUG_ON(mig && mle->master == dlm->node_num);
914 /* we arrived too late. the master does not
915 * have a ref for us. retry. */
916 mlog(0, "%s:%.*s: late on %s\n",
917 dlm->name, namelen, lockid,
918 mig ? "MIGRATION" : "BLOCK");
919 spin_unlock(&dlm->master_lock);
920 spin_unlock(&dlm->spinlock);
922 /* master is known, detach */
924 dlm_mle_detach_hb_events(dlm, mle);
927 /* this is lame, but we cant wait on either
928 * the mle or lockres waitqueue here */
934 /* go ahead and try to master lock on this node */
936 /* make sure this does not get freed below */
938 dlm_init_mle(mle, DLM_MLE_MASTER, dlm, res, NULL, 0);
939 set_bit(dlm->node_num, mle->maybe_map);
940 list_add(&mle->list, &dlm->master_list);
942 /* still holding the dlm spinlock, check the recovery map
943 * to see if there are any nodes that still need to be
944 * considered. these will not appear in the mle nodemap
945 * but they might own this lockres. wait on them. */
946 bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0);
947 if (bit < O2NM_MAX_NODES) {
948 mlog(ML_NOTICE, "%s:%.*s: at least one node (%d) to "
949 "recover before lock mastery can begin\n",
950 dlm->name, namelen, (char *)lockid, bit);
951 wait_on_recovery = 1;
955 /* at this point there is either a DLM_MLE_BLOCK or a
956 * DLM_MLE_MASTER on the master list, so it's safe to add the
957 * lockres to the hashtable. anyone who finds the lock will
958 * still have to wait on the IN_PROGRESS. */
960 /* finally add the lockres to its hash bucket */
961 __dlm_insert_lockres(dlm, res);
962 /* since this lockres is new it doesnt not require the spinlock */
963 dlm_lockres_grab_inflight_ref_new(dlm, res);
965 /* if this node does not become the master make sure to drop
966 * this inflight reference below */
967 drop_inflight_if_nonlocal = 1;
969 /* get an extra ref on the mle in case this is a BLOCK
970 * if so, the creator of the BLOCK may try to put the last
971 * ref at this time in the assert master handler, so we
972 * need an extra one to keep from a bad ptr deref. */
973 dlm_get_mle_inuse(mle);
974 spin_unlock(&dlm->master_lock);
975 spin_unlock(&dlm->spinlock);
978 while (wait_on_recovery) {
979 /* any cluster changes that occurred after dropping the
980 * dlm spinlock would be detectable be a change on the mle,
981 * so we only need to clear out the recovery map once. */
982 if (dlm_is_recovery_lock(lockid, namelen)) {
983 mlog(ML_NOTICE, "%s: recovery map is not empty, but "
984 "must master $RECOVERY lock now\n", dlm->name);
985 if (!dlm_pre_master_reco_lockres(dlm, res))
986 wait_on_recovery = 0;
988 mlog(0, "%s: waiting 500ms for heartbeat state "
989 "change\n", dlm->name);
995 dlm_kick_recovery_thread(dlm);
997 dlm_wait_for_recovery(dlm);
999 spin_lock(&dlm->spinlock);
1000 bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0);
1001 if (bit < O2NM_MAX_NODES) {
1002 mlog(ML_NOTICE, "%s:%.*s: at least one node (%d) to "
1003 "recover before lock mastery can begin\n",
1004 dlm->name, namelen, (char *)lockid, bit);
1005 wait_on_recovery = 1;
1007 wait_on_recovery = 0;
1008 spin_unlock(&dlm->spinlock);
1010 if (wait_on_recovery)
1011 dlm_wait_for_node_recovery(dlm, bit, 10000);
1014 /* must wait for lock to be mastered elsewhere */
1019 dlm_node_iter_init(mle->vote_map, &iter);
1020 while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
1021 ret = dlm_do_master_request(res, mle, nodenum);
1024 if (mle->master != O2NM_MAX_NODES) {
1025 /* found a master ! */
1026 if (mle->master <= nodenum)
1028 /* if our master request has not reached the master
1029 * yet, keep going until it does. this is how the
1030 * master will know that asserts are needed back to
1031 * the lower nodes. */
1032 mlog(0, "%s:%.*s: requests only up to %u but master "
1033 "is %u, keep going\n", dlm->name, namelen,
1034 lockid, nodenum, mle->master);
1039 /* keep going until the response map includes all nodes */
1040 ret = dlm_wait_for_lock_mastery(dlm, res, mle, &blocked);
1042 wait_on_recovery = 1;
1043 mlog(0, "%s:%.*s: node map changed, redo the "
1044 "master request now, blocked=%d\n",
1045 dlm->name, res->lockname.len,
1046 res->lockname.name, blocked);
1048 mlog(ML_ERROR, "%s:%.*s: spinning on "
1049 "dlm_wait_for_lock_mastery, blocked=%d\n",
1050 dlm->name, res->lockname.len,
1051 res->lockname.name, blocked);
1052 dlm_print_one_lock_resource(res);
1053 dlm_print_one_mle(mle);
1059 mlog(0, "lockres mastered by %u\n", res->owner);
1060 /* make sure we never continue without this */
1061 BUG_ON(res->owner == O2NM_MAX_NODES);
1063 /* master is known, detach if not already detached */
1064 dlm_mle_detach_hb_events(dlm, mle);
1066 /* put the extra ref */
1067 dlm_put_mle_inuse(mle);
1070 spin_lock(&res->spinlock);
1071 if (res->owner != dlm->node_num && drop_inflight_if_nonlocal)
1072 dlm_lockres_drop_inflight_ref(dlm, res);
1073 res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
1074 spin_unlock(&res->spinlock);
1078 /* need to free the unused mle */
1080 kmem_cache_free(dlm_mle_cache, alloc_mle);
1086 #define DLM_MASTERY_TIMEOUT_MS 5000
1088 static int dlm_wait_for_lock_mastery(struct dlm_ctxt *dlm,
1089 struct dlm_lock_resource *res,
1090 struct dlm_master_list_entry *mle,
1095 int map_changed, voting_done;
1102 /* check if another node has already become the owner */
1103 spin_lock(&res->spinlock);
1104 if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) {
1105 mlog(0, "%s:%.*s: owner is suddenly %u\n", dlm->name,
1106 res->lockname.len, res->lockname.name, res->owner);
1107 spin_unlock(&res->spinlock);
1108 /* this will cause the master to re-assert across
1109 * the whole cluster, freeing up mles */
1110 if (res->owner != dlm->node_num) {
1111 ret = dlm_do_master_request(res, mle, res->owner);
1113 /* give recovery a chance to run */
1114 mlog(ML_ERROR, "link to %u went down?: %d\n", res->owner, ret);
1122 spin_unlock(&res->spinlock);
1124 spin_lock(&mle->spinlock);
1126 map_changed = (memcmp(mle->vote_map, mle->node_map,
1127 sizeof(mle->vote_map)) != 0);
1128 voting_done = (memcmp(mle->vote_map, mle->response_map,
1129 sizeof(mle->vote_map)) == 0);
1131 /* restart if we hit any errors */
1134 mlog(0, "%s: %.*s: node map changed, restarting\n",
1135 dlm->name, res->lockname.len, res->lockname.name);
1136 ret = dlm_restart_lock_mastery(dlm, res, mle, *blocked);
1137 b = (mle->type == DLM_MLE_BLOCK);
1138 if ((*blocked && !b) || (!*blocked && b)) {
1139 mlog(0, "%s:%.*s: status change: old=%d new=%d\n",
1140 dlm->name, res->lockname.len, res->lockname.name,
1144 spin_unlock(&mle->spinlock);
1149 mlog(0, "%s:%.*s: restart lock mastery succeeded, "
1150 "rechecking now\n", dlm->name, res->lockname.len,
1151 res->lockname.name);
1155 mlog(0, "map not changed and voting not done "
1156 "for %s:%.*s\n", dlm->name, res->lockname.len,
1157 res->lockname.name);
1161 if (m != O2NM_MAX_NODES) {
1162 /* another node has done an assert!
1167 /* have all nodes responded? */
1168 if (voting_done && !*blocked) {
1169 bit = find_next_bit(mle->maybe_map, O2NM_MAX_NODES, 0);
1170 if (dlm->node_num <= bit) {
1171 /* my node number is lowest.
1172 * now tell other nodes that I am
1173 * mastering this. */
1174 mle->master = dlm->node_num;
1175 /* ref was grabbed in get_lock_resource
1176 * will be dropped in dlmlock_master */
1180 /* if voting is done, but we have not received
1181 * an assert master yet, we must sleep */
1185 spin_unlock(&mle->spinlock);
1187 /* sleep if we haven't finished voting yet */
1189 unsigned long timeo = msecs_to_jiffies(DLM_MASTERY_TIMEOUT_MS);
1192 if (atomic_read(&mle->mle_refs.refcount) < 2)
1193 mlog(ML_ERROR, "mle (%p) refs=%d, name=%.*s\n", mle,
1194 atomic_read(&mle->mle_refs.refcount),
1195 res->lockname.len, res->lockname.name);
1197 atomic_set(&mle->woken, 0);
1198 (void)wait_event_timeout(mle->wq,
1199 (atomic_read(&mle->woken) == 1),
1201 if (res->owner == O2NM_MAX_NODES) {
1202 mlog(0, "%s:%.*s: waiting again\n", dlm->name,
1203 res->lockname.len, res->lockname.name);
1206 mlog(0, "done waiting, master is %u\n", res->owner);
1214 mlog(0, "about to master %.*s here, this=%u\n",
1215 res->lockname.len, res->lockname.name, m);
1216 ret = dlm_do_assert_master(dlm, res, mle->vote_map, 0);
1218 /* This is a failure in the network path,
1219 * not in the response to the assert_master
1220 * (any nonzero response is a BUG on this node).
1221 * Most likely a socket just got disconnected
1222 * due to node death. */
1225 /* no longer need to restart lock mastery.
1226 * all living nodes have been contacted. */
1230 /* set the lockres owner */
1231 spin_lock(&res->spinlock);
1232 /* mastery reference obtained either during
1233 * assert_master_handler or in get_lock_resource */
1234 dlm_change_lockres_owner(dlm, res, m);
1235 spin_unlock(&res->spinlock);
1241 struct dlm_bitmap_diff_iter
1244 unsigned long *orig_bm;
1245 unsigned long *cur_bm;
1246 unsigned long diff_bm[BITS_TO_LONGS(O2NM_MAX_NODES)];
1249 enum dlm_node_state_change
1256 static void dlm_bitmap_diff_iter_init(struct dlm_bitmap_diff_iter *iter,
1257 unsigned long *orig_bm,
1258 unsigned long *cur_bm)
1260 unsigned long p1, p2;
1264 iter->orig_bm = orig_bm;
1265 iter->cur_bm = cur_bm;
1267 for (i = 0; i < BITS_TO_LONGS(O2NM_MAX_NODES); i++) {
1268 p1 = *(iter->orig_bm + i);
1269 p2 = *(iter->cur_bm + i);
1270 iter->diff_bm[i] = (p1 & ~p2) | (p2 & ~p1);
1274 static int dlm_bitmap_diff_iter_next(struct dlm_bitmap_diff_iter *iter,
1275 enum dlm_node_state_change *state)
1279 if (iter->curnode >= O2NM_MAX_NODES)
1282 bit = find_next_bit(iter->diff_bm, O2NM_MAX_NODES,
1284 if (bit >= O2NM_MAX_NODES) {
1285 iter->curnode = O2NM_MAX_NODES;
1289 /* if it was there in the original then this node died */
1290 if (test_bit(bit, iter->orig_bm))
1295 iter->curnode = bit;
1300 static int dlm_restart_lock_mastery(struct dlm_ctxt *dlm,
1301 struct dlm_lock_resource *res,
1302 struct dlm_master_list_entry *mle,
1305 struct dlm_bitmap_diff_iter bdi;
1306 enum dlm_node_state_change sc;
1310 mlog(0, "something happened such that the "
1311 "master process may need to be restarted!\n");
1313 assert_spin_locked(&mle->spinlock);
1315 dlm_bitmap_diff_iter_init(&bdi, mle->vote_map, mle->node_map);
1316 node = dlm_bitmap_diff_iter_next(&bdi, &sc);
1318 if (sc == NODE_UP) {
1319 /* a node came up. clear any old vote from
1320 * the response map and set it in the vote map
1321 * then restart the mastery. */
1322 mlog(ML_NOTICE, "node %d up while restarting\n", node);
1324 /* redo the master request, but only for the new node */
1325 mlog(0, "sending request to new node\n");
1326 clear_bit(node, mle->response_map);
1327 set_bit(node, mle->vote_map);
1329 mlog(ML_ERROR, "node down! %d\n", node);
1331 int lowest = find_next_bit(mle->maybe_map,
1334 /* act like it was never there */
1335 clear_bit(node, mle->maybe_map);
1337 if (node == lowest) {
1338 mlog(0, "expected master %u died"
1339 " while this node was blocked "
1340 "waiting on it!\n", node);
1341 lowest = find_next_bit(mle->maybe_map,
1344 if (lowest < O2NM_MAX_NODES) {
1345 mlog(0, "%s:%.*s:still "
1346 "blocked. waiting on %u "
1352 /* mle is an MLE_BLOCK, but
1353 * there is now nothing left to
1354 * block on. we need to return
1355 * all the way back out and try
1356 * again with an MLE_MASTER.
1357 * dlm_do_local_recovery_cleanup
1358 * has already run, so the mle
1360 mlog(0, "%s:%.*s: no "
1361 "longer blocking. try to "
1362 "master this here\n",
1365 res->lockname.name);
1366 mle->type = DLM_MLE_MASTER;
1372 /* now blank out everything, as if we had never
1373 * contacted anyone */
1374 memset(mle->maybe_map, 0, sizeof(mle->maybe_map));
1375 memset(mle->response_map, 0, sizeof(mle->response_map));
1376 /* reset the vote_map to the current node_map */
1377 memcpy(mle->vote_map, mle->node_map,
1378 sizeof(mle->node_map));
1379 /* put myself into the maybe map */
1380 if (mle->type != DLM_MLE_BLOCK)
1381 set_bit(dlm->node_num, mle->maybe_map);
1384 node = dlm_bitmap_diff_iter_next(&bdi, &sc);
1391 * DLM_MASTER_REQUEST_MSG
1393 * returns: 0 on success,
1394 * -errno on a network error
1396 * on error, the caller should assume the target node is "dead"
1400 static int dlm_do_master_request(struct dlm_lock_resource *res,
1401 struct dlm_master_list_entry *mle, int to)
1403 struct dlm_ctxt *dlm = mle->dlm;
1404 struct dlm_master_request request;
1405 int ret, response=0, resend;
1407 memset(&request, 0, sizeof(request));
1408 request.node_idx = dlm->node_num;
1410 BUG_ON(mle->type == DLM_MLE_MIGRATION);
1412 if (mle->type != DLM_MLE_MASTER) {
1413 request.namelen = mle->u.name.len;
1414 memcpy(request.name, mle->u.name.name, request.namelen);
1416 request.namelen = mle->u.res->lockname.len;
1417 memcpy(request.name, mle->u.res->lockname.name,
1422 ret = o2net_send_message(DLM_MASTER_REQUEST_MSG, dlm->key, &request,
1423 sizeof(request), to, &response);
1425 if (ret == -ESRCH) {
1426 /* should never happen */
1427 mlog(ML_ERROR, "TCP stack not ready!\n");
1429 } else if (ret == -EINVAL) {
1430 mlog(ML_ERROR, "bad args passed to o2net!\n");
1432 } else if (ret == -ENOMEM) {
1433 mlog(ML_ERROR, "out of memory while trying to send "
1434 "network message! retrying\n");
1435 /* this is totally crude */
1438 } else if (!dlm_is_host_down(ret)) {
1439 /* not a network error. bad. */
1441 mlog(ML_ERROR, "unhandled error!");
1444 /* all other errors should be network errors,
1445 * and likely indicate node death */
1446 mlog(ML_ERROR, "link to %d went down!\n", to);
1452 spin_lock(&mle->spinlock);
1454 case DLM_MASTER_RESP_YES:
1455 set_bit(to, mle->response_map);
1456 mlog(0, "node %u is the master, response=YES\n", to);
1457 mlog(0, "%s:%.*s: master node %u now knows I have a "
1458 "reference\n", dlm->name, res->lockname.len,
1459 res->lockname.name, to);
1462 case DLM_MASTER_RESP_NO:
1463 mlog(0, "node %u not master, response=NO\n", to);
1464 set_bit(to, mle->response_map);
1466 case DLM_MASTER_RESP_MAYBE:
1467 mlog(0, "node %u not master, response=MAYBE\n", to);
1468 set_bit(to, mle->response_map);
1469 set_bit(to, mle->maybe_map);
1471 case DLM_MASTER_RESP_ERROR:
1472 mlog(0, "node %u hit an error, resending\n", to);
1477 mlog(ML_ERROR, "bad response! %u\n", response);
1480 spin_unlock(&mle->spinlock);
1482 /* this is also totally crude */
1492 * locks that can be taken here:
1498 * if possible, TRIM THIS DOWN!!!
1500 int dlm_master_request_handler(struct o2net_msg *msg, u32 len, void *data,
1503 u8 response = DLM_MASTER_RESP_MAYBE;
1504 struct dlm_ctxt *dlm = data;
1505 struct dlm_lock_resource *res = NULL;
1506 struct dlm_master_request *request = (struct dlm_master_request *) msg->buf;
1507 struct dlm_master_list_entry *mle = NULL, *tmpmle = NULL;
1509 unsigned int namelen, hash;
1512 int dispatch_assert = 0;
1515 return DLM_MASTER_RESP_NO;
1517 if (!dlm_domain_fully_joined(dlm)) {
1518 response = DLM_MASTER_RESP_NO;
1522 name = request->name;
1523 namelen = request->namelen;
1524 hash = dlm_lockid_hash(name, namelen);
1526 if (namelen > DLM_LOCKID_NAME_MAX) {
1527 response = DLM_IVBUFLEN;
1532 spin_lock(&dlm->spinlock);
1533 res = __dlm_lookup_lockres(dlm, name, namelen, hash);
1535 spin_unlock(&dlm->spinlock);
1537 /* take care of the easy cases up front */
1538 spin_lock(&res->spinlock);
1539 if (res->state & (DLM_LOCK_RES_RECOVERING|
1540 DLM_LOCK_RES_MIGRATING)) {
1541 spin_unlock(&res->spinlock);
1542 mlog(0, "returning DLM_MASTER_RESP_ERROR since res is "
1543 "being recovered/migrated\n");
1544 response = DLM_MASTER_RESP_ERROR;
1546 kmem_cache_free(dlm_mle_cache, mle);
1550 if (res->owner == dlm->node_num) {
1551 mlog(0, "%s:%.*s: setting bit %u in refmap\n",
1552 dlm->name, namelen, name, request->node_idx);
1553 dlm_lockres_set_refmap_bit(request->node_idx, res);
1554 spin_unlock(&res->spinlock);
1555 response = DLM_MASTER_RESP_YES;
1557 kmem_cache_free(dlm_mle_cache, mle);
1559 /* this node is the owner.
1560 * there is some extra work that needs to
1561 * happen now. the requesting node has
1562 * caused all nodes up to this one to
1563 * create mles. this node now needs to
1564 * go back and clean those up. */
1565 dispatch_assert = 1;
1567 } else if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) {
1568 spin_unlock(&res->spinlock);
1569 // mlog(0, "node %u is the master\n", res->owner);
1570 response = DLM_MASTER_RESP_NO;
1572 kmem_cache_free(dlm_mle_cache, mle);
1576 /* ok, there is no owner. either this node is
1577 * being blocked, or it is actively trying to
1578 * master this lock. */
1579 if (!(res->state & DLM_LOCK_RES_IN_PROGRESS)) {
1580 mlog(ML_ERROR, "lock with no owner should be "
1585 // mlog(0, "lockres is in progress...\n");
1586 spin_lock(&dlm->master_lock);
1587 found = dlm_find_mle(dlm, &tmpmle, name, namelen);
1589 mlog(ML_ERROR, "no mle found for this lock!\n");
1593 spin_lock(&tmpmle->spinlock);
1594 if (tmpmle->type == DLM_MLE_BLOCK) {
1595 // mlog(0, "this node is waiting for "
1596 // "lockres to be mastered\n");
1597 response = DLM_MASTER_RESP_NO;
1598 } else if (tmpmle->type == DLM_MLE_MIGRATION) {
1599 mlog(0, "node %u is master, but trying to migrate to "
1600 "node %u.\n", tmpmle->master, tmpmle->new_master);
1601 if (tmpmle->master == dlm->node_num) {
1602 mlog(ML_ERROR, "no owner on lockres, but this "
1603 "node is trying to migrate it to %u?!\n",
1604 tmpmle->new_master);
1607 /* the real master can respond on its own */
1608 response = DLM_MASTER_RESP_NO;
1610 } else if (tmpmle->master != DLM_LOCK_RES_OWNER_UNKNOWN) {
1612 if (tmpmle->master == dlm->node_num) {
1613 response = DLM_MASTER_RESP_YES;
1614 /* this node will be the owner.
1615 * go back and clean the mles on any
1617 dispatch_assert = 1;
1618 dlm_lockres_set_refmap_bit(request->node_idx, res);
1619 mlog(0, "%s:%.*s: setting bit %u in refmap\n",
1620 dlm->name, namelen, name,
1623 response = DLM_MASTER_RESP_NO;
1625 // mlog(0, "this node is attempting to "
1626 // "master lockres\n");
1627 response = DLM_MASTER_RESP_MAYBE;
1630 set_bit(request->node_idx, tmpmle->maybe_map);
1631 spin_unlock(&tmpmle->spinlock);
1633 spin_unlock(&dlm->master_lock);
1634 spin_unlock(&res->spinlock);
1636 /* keep the mle attached to heartbeat events */
1637 dlm_put_mle(tmpmle);
1639 kmem_cache_free(dlm_mle_cache, mle);
1644 * lockres doesn't exist on this node
1645 * if there is an MLE_BLOCK, return NO
1646 * if there is an MLE_MASTER, return MAYBE
1647 * otherwise, add an MLE_BLOCK, return NO
1649 spin_lock(&dlm->master_lock);
1650 found = dlm_find_mle(dlm, &tmpmle, name, namelen);
1652 /* this lockid has never been seen on this node yet */
1653 // mlog(0, "no mle found\n");
1655 spin_unlock(&dlm->master_lock);
1656 spin_unlock(&dlm->spinlock);
1658 mle = (struct dlm_master_list_entry *)
1659 kmem_cache_alloc(dlm_mle_cache, GFP_NOFS);
1661 response = DLM_MASTER_RESP_ERROR;
1662 mlog_errno(-ENOMEM);
1668 // mlog(0, "this is second time thru, already allocated, "
1669 // "add the block.\n");
1670 dlm_init_mle(mle, DLM_MLE_BLOCK, dlm, NULL, name, namelen);
1671 set_bit(request->node_idx, mle->maybe_map);
1672 list_add(&mle->list, &dlm->master_list);
1673 response = DLM_MASTER_RESP_NO;
1675 // mlog(0, "mle was found\n");
1677 spin_lock(&tmpmle->spinlock);
1678 if (tmpmle->master == dlm->node_num) {
1679 mlog(ML_ERROR, "no lockres, but an mle with this node as master!\n");
1682 if (tmpmle->type == DLM_MLE_BLOCK)
1683 response = DLM_MASTER_RESP_NO;
1684 else if (tmpmle->type == DLM_MLE_MIGRATION) {
1685 mlog(0, "migration mle was found (%u->%u)\n",
1686 tmpmle->master, tmpmle->new_master);
1687 /* real master can respond on its own */
1688 response = DLM_MASTER_RESP_NO;
1690 response = DLM_MASTER_RESP_MAYBE;
1692 set_bit(request->node_idx, tmpmle->maybe_map);
1693 spin_unlock(&tmpmle->spinlock);
1695 spin_unlock(&dlm->master_lock);
1696 spin_unlock(&dlm->spinlock);
1699 /* keep the mle attached to heartbeat events */
1700 dlm_put_mle(tmpmle);
1704 * __dlm_lookup_lockres() grabbed a reference to this lockres.
1705 * The reference is released by dlm_assert_master_worker() under
1706 * the call to dlm_dispatch_assert_master(). If
1707 * dlm_assert_master_worker() isn't called, we drop it here.
1709 if (dispatch_assert) {
1710 if (response != DLM_MASTER_RESP_YES)
1711 mlog(ML_ERROR, "invalid response %d\n", response);
1713 mlog(ML_ERROR, "bad lockres while trying to assert!\n");
1716 mlog(0, "%u is the owner of %.*s, cleaning everyone else\n",
1717 dlm->node_num, res->lockname.len, res->lockname.name);
1718 ret = dlm_dispatch_assert_master(dlm, res, 0, request->node_idx,
1719 DLM_ASSERT_MASTER_MLE_CLEANUP);
1721 mlog(ML_ERROR, "failed to dispatch assert master work\n");
1722 response = DLM_MASTER_RESP_ERROR;
1723 dlm_lockres_put(res);
1727 dlm_lockres_put(res);
1735 * DLM_ASSERT_MASTER_MSG
1740 * NOTE: this can be used for debugging
1741 * can periodically run all locks owned by this node
1742 * and re-assert across the cluster...
1744 static int dlm_do_assert_master(struct dlm_ctxt *dlm,
1745 struct dlm_lock_resource *res,
1746 void *nodemap, u32 flags)
1748 struct dlm_assert_master assert;
1750 struct dlm_node_iter iter;
1753 const char *lockname = res->lockname.name;
1754 unsigned int namelen = res->lockname.len;
1756 BUG_ON(namelen > O2NM_MAX_NAME_LEN);
1758 spin_lock(&res->spinlock);
1759 res->state |= DLM_LOCK_RES_SETREF_INPROG;
1760 spin_unlock(&res->spinlock);
1765 /* note that if this nodemap is empty, it returns 0 */
1766 dlm_node_iter_init(nodemap, &iter);
1767 while ((to = dlm_node_iter_next(&iter)) >= 0) {
1769 struct dlm_master_list_entry *mle = NULL;
1771 mlog(0, "sending assert master to %d (%.*s)\n", to,
1773 memset(&assert, 0, sizeof(assert));
1774 assert.node_idx = dlm->node_num;
1775 assert.namelen = namelen;
1776 memcpy(assert.name, lockname, namelen);
1777 assert.flags = cpu_to_be32(flags);
1779 tmpret = o2net_send_message(DLM_ASSERT_MASTER_MSG, dlm->key,
1780 &assert, sizeof(assert), to, &r);
1782 mlog(0, "assert_master returned %d!\n", tmpret);
1783 if (!dlm_is_host_down(tmpret)) {
1784 mlog(ML_ERROR, "unhandled error=%d!\n", tmpret);
1787 /* a node died. finish out the rest of the nodes. */
1788 mlog(0, "link to %d went down!\n", to);
1789 /* any nonzero status return will do */
1793 /* ok, something horribly messed. kill thyself. */
1794 mlog(ML_ERROR,"during assert master of %.*s to %u, "
1795 "got %d.\n", namelen, lockname, to, r);
1796 spin_lock(&dlm->spinlock);
1797 spin_lock(&dlm->master_lock);
1798 if (dlm_find_mle(dlm, &mle, (char *)lockname,
1800 dlm_print_one_mle(mle);
1803 spin_unlock(&dlm->master_lock);
1804 spin_unlock(&dlm->spinlock);
1808 if (r & DLM_ASSERT_RESPONSE_REASSERT &&
1809 !(r & DLM_ASSERT_RESPONSE_MASTERY_REF)) {
1810 mlog(ML_ERROR, "%.*s: very strange, "
1811 "master MLE but no lockres on %u\n",
1812 namelen, lockname, to);
1815 if (r & DLM_ASSERT_RESPONSE_REASSERT) {
1816 mlog(0, "%.*s: node %u create mles on other "
1817 "nodes and requests a re-assert\n",
1818 namelen, lockname, to);
1821 if (r & DLM_ASSERT_RESPONSE_MASTERY_REF) {
1822 mlog(0, "%.*s: node %u has a reference to this "
1823 "lockres, set the bit in the refmap\n",
1824 namelen, lockname, to);
1825 spin_lock(&res->spinlock);
1826 dlm_lockres_set_refmap_bit(to, res);
1827 spin_unlock(&res->spinlock);
1834 spin_lock(&res->spinlock);
1835 res->state &= ~DLM_LOCK_RES_SETREF_INPROG;
1836 spin_unlock(&res->spinlock);
1843 * locks that can be taken here:
1849 * if possible, TRIM THIS DOWN!!!
1851 int dlm_assert_master_handler(struct o2net_msg *msg, u32 len, void *data,
1854 struct dlm_ctxt *dlm = data;
1855 struct dlm_master_list_entry *mle = NULL;
1856 struct dlm_assert_master *assert = (struct dlm_assert_master *)msg->buf;
1857 struct dlm_lock_resource *res = NULL;
1859 unsigned int namelen, hash;
1861 int master_request = 0, have_lockres_ref = 0;
1867 name = assert->name;
1868 namelen = assert->namelen;
1869 hash = dlm_lockid_hash(name, namelen);
1870 flags = be32_to_cpu(assert->flags);
1872 if (namelen > DLM_LOCKID_NAME_MAX) {
1873 mlog(ML_ERROR, "Invalid name length!");
1877 spin_lock(&dlm->spinlock);
1880 mlog(0, "assert_master with flags: %u\n", flags);
1883 spin_lock(&dlm->master_lock);
1884 if (!dlm_find_mle(dlm, &mle, name, namelen)) {
1885 /* not an error, could be master just re-asserting */
1886 mlog(0, "just got an assert_master from %u, but no "
1887 "MLE for it! (%.*s)\n", assert->node_idx,
1890 int bit = find_next_bit (mle->maybe_map, O2NM_MAX_NODES, 0);
1891 if (bit >= O2NM_MAX_NODES) {
1892 /* not necessarily an error, though less likely.
1893 * could be master just re-asserting. */
1894 mlog(0, "no bits set in the maybe_map, but %u "
1895 "is asserting! (%.*s)\n", assert->node_idx,
1897 } else if (bit != assert->node_idx) {
1898 if (flags & DLM_ASSERT_MASTER_MLE_CLEANUP) {
1899 mlog(0, "master %u was found, %u should "
1900 "back off\n", assert->node_idx, bit);
1902 /* with the fix for bug 569, a higher node
1903 * number winning the mastery will respond
1904 * YES to mastery requests, but this node
1905 * had no way of knowing. let it pass. */
1906 mlog(0, "%u is the lowest node, "
1907 "%u is asserting. (%.*s) %u must "
1908 "have begun after %u won.\n", bit,
1909 assert->node_idx, namelen, name, bit,
1913 if (mle->type == DLM_MLE_MIGRATION) {
1914 if (flags & DLM_ASSERT_MASTER_MLE_CLEANUP) {
1915 mlog(0, "%s:%.*s: got cleanup assert"
1916 " from %u for migration\n",
1917 dlm->name, namelen, name,
1919 } else if (!(flags & DLM_ASSERT_MASTER_FINISH_MIGRATION)) {
1920 mlog(0, "%s:%.*s: got unrelated assert"
1921 " from %u for migration, ignoring\n",
1922 dlm->name, namelen, name,
1925 spin_unlock(&dlm->master_lock);
1926 spin_unlock(&dlm->spinlock);
1931 spin_unlock(&dlm->master_lock);
1933 /* ok everything checks out with the MLE
1934 * now check to see if there is a lockres */
1935 res = __dlm_lookup_lockres(dlm, name, namelen, hash);
1937 spin_lock(&res->spinlock);
1938 if (res->state & DLM_LOCK_RES_RECOVERING) {
1939 mlog(ML_ERROR, "%u asserting but %.*s is "
1940 "RECOVERING!\n", assert->node_idx, namelen, name);
1944 if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN &&
1945 res->owner != assert->node_idx) {
1946 mlog(ML_ERROR, "assert_master from "
1947 "%u, but current owner is "
1949 assert->node_idx, res->owner,
1953 } else if (mle->type != DLM_MLE_MIGRATION) {
1954 if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) {
1955 /* owner is just re-asserting */
1956 if (res->owner == assert->node_idx) {
1957 mlog(0, "owner %u re-asserting on "
1958 "lock %.*s\n", assert->node_idx,
1962 mlog(ML_ERROR, "got assert_master from "
1963 "node %u, but %u is the owner! "
1964 "(%.*s)\n", assert->node_idx,
1965 res->owner, namelen, name);
1968 if (!(res->state & DLM_LOCK_RES_IN_PROGRESS)) {
1969 mlog(ML_ERROR, "got assert from %u, but lock "
1970 "with no owner should be "
1971 "in-progress! (%.*s)\n",
1976 } else /* mle->type == DLM_MLE_MIGRATION */ {
1977 /* should only be getting an assert from new master */
1978 if (assert->node_idx != mle->new_master) {
1979 mlog(ML_ERROR, "got assert from %u, but "
1980 "new master is %u, and old master "
1982 assert->node_idx, mle->new_master,
1983 mle->master, namelen, name);
1989 spin_unlock(&res->spinlock);
1991 spin_unlock(&dlm->spinlock);
1993 // mlog(0, "woo! got an assert_master from node %u!\n",
1994 // assert->node_idx);
2000 spin_lock(&mle->spinlock);
2001 if (mle->type == DLM_MLE_BLOCK || mle->type == DLM_MLE_MIGRATION)
2004 /* MASTER mle: if any bits set in the response map
2005 * then the calling node needs to re-assert to clear
2006 * up nodes that this node contacted */
2007 while ((nn = find_next_bit (mle->response_map, O2NM_MAX_NODES,
2008 nn+1)) < O2NM_MAX_NODES) {
2009 if (nn != dlm->node_num && nn != assert->node_idx)
2013 mle->master = assert->node_idx;
2014 atomic_set(&mle->woken, 1);
2016 spin_unlock(&mle->spinlock);
2020 spin_lock(&res->spinlock);
2021 if (mle->type == DLM_MLE_MIGRATION) {
2022 mlog(0, "finishing off migration of lockres %.*s, "
2024 res->lockname.len, res->lockname.name,
2025 dlm->node_num, mle->new_master);
2026 res->state &= ~DLM_LOCK_RES_MIGRATING;
2028 dlm_change_lockres_owner(dlm, res, mle->new_master);
2029 BUG_ON(res->state & DLM_LOCK_RES_DIRTY);
2031 dlm_change_lockres_owner(dlm, res, mle->master);
2033 spin_unlock(&res->spinlock);
2034 have_lockres_ref = 1;
2039 /* master is known, detach if not already detached.
2040 * ensures that only one assert_master call will happen
2042 spin_lock(&dlm->spinlock);
2043 spin_lock(&dlm->master_lock);
2045 rr = atomic_read(&mle->mle_refs.refcount);
2046 if (mle->inuse > 0) {
2047 if (extra_ref && rr < 3)
2049 else if (!extra_ref && rr < 2)
2052 if (extra_ref && rr < 2)
2054 else if (!extra_ref && rr < 1)
2058 mlog(ML_ERROR, "%s:%.*s: got assert master from %u "
2059 "that will mess up this node, refs=%d, extra=%d, "
2060 "inuse=%d\n", dlm->name, namelen, name,
2061 assert->node_idx, rr, extra_ref, mle->inuse);
2062 dlm_print_one_mle(mle);
2064 list_del_init(&mle->list);
2065 __dlm_mle_detach_hb_events(dlm, mle);
2068 /* the assert master message now balances the extra
2069 * ref given by the master / migration request message.
2070 * if this is the last put, it will be removed
2074 spin_unlock(&dlm->master_lock);
2075 spin_unlock(&dlm->spinlock);
2077 if (res->owner != assert->node_idx) {
2078 mlog(0, "assert_master from %u, but current "
2079 "owner is %u (%.*s), no mle\n", assert->node_idx,
2080 res->owner, namelen, name);
2087 spin_lock(&res->spinlock);
2088 res->state |= DLM_LOCK_RES_SETREF_INPROG;
2089 spin_unlock(&res->spinlock);
2090 *ret_data = (void *)res;
2093 if (master_request) {
2094 mlog(0, "need to tell master to reassert\n");
2095 /* positive. negative would shoot down the node. */
2096 ret |= DLM_ASSERT_RESPONSE_REASSERT;
2097 if (!have_lockres_ref) {
2098 mlog(ML_ERROR, "strange, got assert from %u, MASTER "
2099 "mle present here for %s:%.*s, but no lockres!\n",
2100 assert->node_idx, dlm->name, namelen, name);
2103 if (have_lockres_ref) {
2104 /* let the master know we have a reference to the lockres */
2105 ret |= DLM_ASSERT_RESPONSE_MASTERY_REF;
2106 mlog(0, "%s:%.*s: got assert from %u, need a ref\n",
2107 dlm->name, namelen, name, assert->node_idx);
2112 /* kill the caller! */
2113 mlog(ML_ERROR, "Bad message received from another node. Dumping state "
2114 "and killing the other node now! This node is OK and can continue.\n");
2115 __dlm_print_one_lock_resource(res);
2116 spin_unlock(&res->spinlock);
2117 spin_unlock(&dlm->spinlock);
2118 *ret_data = (void *)res;
2123 void dlm_assert_master_post_handler(int status, void *data, void *ret_data)
2125 struct dlm_lock_resource *res = (struct dlm_lock_resource *)ret_data;
2128 spin_lock(&res->spinlock);
2129 res->state &= ~DLM_LOCK_RES_SETREF_INPROG;
2130 spin_unlock(&res->spinlock);
2132 dlm_lockres_put(res);
2137 int dlm_dispatch_assert_master(struct dlm_ctxt *dlm,
2138 struct dlm_lock_resource *res,
2139 int ignore_higher, u8 request_from, u32 flags)
2141 struct dlm_work_item *item;
2142 item = kzalloc(sizeof(*item), GFP_NOFS);
2147 /* queue up work for dlm_assert_master_worker */
2148 dlm_grab(dlm); /* get an extra ref for the work item */
2149 dlm_init_work_item(dlm, item, dlm_assert_master_worker, NULL);
2150 item->u.am.lockres = res; /* already have a ref */
2151 /* can optionally ignore node numbers higher than this node */
2152 item->u.am.ignore_higher = ignore_higher;
2153 item->u.am.request_from = request_from;
2154 item->u.am.flags = flags;
2157 mlog(0, "IGNORE HIGHER: %.*s\n", res->lockname.len,
2158 res->lockname.name);
2160 spin_lock(&dlm->work_lock);
2161 list_add_tail(&item->list, &dlm->work_list);
2162 spin_unlock(&dlm->work_lock);
2164 queue_work(dlm->dlm_worker, &dlm->dispatched_work);
2168 static void dlm_assert_master_worker(struct dlm_work_item *item, void *data)
2170 struct dlm_ctxt *dlm = data;
2172 struct dlm_lock_resource *res;
2173 unsigned long nodemap[BITS_TO_LONGS(O2NM_MAX_NODES)];
2180 res = item->u.am.lockres;
2181 ignore_higher = item->u.am.ignore_higher;
2182 request_from = item->u.am.request_from;
2183 flags = item->u.am.flags;
2185 spin_lock(&dlm->spinlock);
2186 memcpy(nodemap, dlm->domain_map, sizeof(nodemap));
2187 spin_unlock(&dlm->spinlock);
2189 clear_bit(dlm->node_num, nodemap);
2190 if (ignore_higher) {
2191 /* if is this just to clear up mles for nodes below
2192 * this node, do not send the message to the original
2193 * caller or any node number higher than this */
2194 clear_bit(request_from, nodemap);
2195 bit = dlm->node_num;
2197 bit = find_next_bit(nodemap, O2NM_MAX_NODES,
2199 if (bit >= O2NM_MAX_NODES)
2201 clear_bit(bit, nodemap);
2206 * If we're migrating this lock to someone else, we are no
2207 * longer allowed to assert out own mastery. OTOH, we need to
2208 * prevent migration from starting while we're still asserting
2209 * our dominance. The reserved ast delays migration.
2211 spin_lock(&res->spinlock);
2212 if (res->state & DLM_LOCK_RES_MIGRATING) {
2213 mlog(0, "Someone asked us to assert mastery, but we're "
2214 "in the middle of migration. Skipping assert, "
2215 "the new master will handle that.\n");
2216 spin_unlock(&res->spinlock);
2219 __dlm_lockres_reserve_ast(res);
2220 spin_unlock(&res->spinlock);
2222 /* this call now finishes out the nodemap
2223 * even if one or more nodes die */
2224 mlog(0, "worker about to master %.*s here, this=%u\n",
2225 res->lockname.len, res->lockname.name, dlm->node_num);
2226 ret = dlm_do_assert_master(dlm, res, nodemap, flags);
2228 /* no need to restart, we are done */
2229 if (!dlm_is_host_down(ret))
2233 /* Ok, we've asserted ourselves. Let's let migration start. */
2234 dlm_lockres_release_ast(dlm, res);
2237 dlm_lockres_put(res);
2239 mlog(0, "finished with dlm_assert_master_worker\n");
2242 /* SPECIAL CASE for the $RECOVERY lock used by the recovery thread.
2243 * We cannot wait for node recovery to complete to begin mastering this
2244 * lockres because this lockres is used to kick off recovery! ;-)
2245 * So, do a pre-check on all living nodes to see if any of those nodes
2246 * think that $RECOVERY is currently mastered by a dead node. If so,
2247 * we wait a short time to allow that node to get notified by its own
2248 * heartbeat stack, then check again. All $RECOVERY lock resources
2249 * mastered by dead nodes are purged when the hearbeat callback is
2250 * fired, so we can know for sure that it is safe to continue once
2251 * the node returns a live node or no node. */
2252 static int dlm_pre_master_reco_lockres(struct dlm_ctxt *dlm,
2253 struct dlm_lock_resource *res)
2255 struct dlm_node_iter iter;
2258 u8 master = DLM_LOCK_RES_OWNER_UNKNOWN;
2260 spin_lock(&dlm->spinlock);
2261 dlm_node_iter_init(dlm->domain_map, &iter);
2262 spin_unlock(&dlm->spinlock);
2264 while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
2265 /* do not send to self */
2266 if (nodenum == dlm->node_num)
2268 ret = dlm_do_master_requery(dlm, res, nodenum, &master);
2271 if (!dlm_is_host_down(ret))
2273 /* host is down, so answer for that node would be
2274 * DLM_LOCK_RES_OWNER_UNKNOWN. continue. */
2278 if (master != DLM_LOCK_RES_OWNER_UNKNOWN) {
2279 /* check to see if this master is in the recovery map */
2280 spin_lock(&dlm->spinlock);
2281 if (test_bit(master, dlm->recovery_map)) {
2282 mlog(ML_NOTICE, "%s: node %u has not seen "
2283 "node %u go down yet, and thinks the "
2284 "dead node is mastering the recovery "
2285 "lock. must wait.\n", dlm->name,
2289 spin_unlock(&dlm->spinlock);
2290 mlog(0, "%s: reco lock master is %u\n", dlm->name,
2299 * DLM_DEREF_LOCKRES_MSG
2302 int dlm_drop_lockres_ref(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
2304 struct dlm_deref_lockres deref;
2306 const char *lockname;
2307 unsigned int namelen;
2309 lockname = res->lockname.name;
2310 namelen = res->lockname.len;
2311 BUG_ON(namelen > O2NM_MAX_NAME_LEN);
2313 mlog(0, "%s:%.*s: sending deref to %d\n",
2314 dlm->name, namelen, lockname, res->owner);
2315 memset(&deref, 0, sizeof(deref));
2316 deref.node_idx = dlm->node_num;
2317 deref.namelen = namelen;
2318 memcpy(deref.name, lockname, namelen);
2320 ret = o2net_send_message(DLM_DEREF_LOCKRES_MSG, dlm->key,
2321 &deref, sizeof(deref), res->owner, &r);
2325 /* BAD. other node says I did not have a ref. */
2326 mlog(ML_ERROR,"while dropping ref on %s:%.*s "
2327 "(master=%u) got %d.\n", dlm->name, namelen,
2328 lockname, res->owner, r);
2329 dlm_print_one_lock_resource(res);
2335 int dlm_deref_lockres_handler(struct o2net_msg *msg, u32 len, void *data,
2338 struct dlm_ctxt *dlm = data;
2339 struct dlm_deref_lockres *deref = (struct dlm_deref_lockres *)msg->buf;
2340 struct dlm_lock_resource *res = NULL;
2342 unsigned int namelen;
2346 struct dlm_work_item *item;
2354 namelen = deref->namelen;
2355 node = deref->node_idx;
2357 if (namelen > DLM_LOCKID_NAME_MAX) {
2358 mlog(ML_ERROR, "Invalid name length!");
2361 if (deref->node_idx >= O2NM_MAX_NODES) {
2362 mlog(ML_ERROR, "Invalid node number: %u\n", node);
2366 hash = dlm_lockid_hash(name, namelen);
2368 spin_lock(&dlm->spinlock);
2369 res = __dlm_lookup_lockres_full(dlm, name, namelen, hash);
2371 spin_unlock(&dlm->spinlock);
2372 mlog(ML_ERROR, "%s:%.*s: bad lockres name\n",
2373 dlm->name, namelen, name);
2376 spin_unlock(&dlm->spinlock);
2378 spin_lock(&res->spinlock);
2379 if (res->state & DLM_LOCK_RES_SETREF_INPROG)
2382 BUG_ON(res->state & DLM_LOCK_RES_DROPPING_REF);
2383 if (test_bit(node, res->refmap)) {
2384 dlm_lockres_clear_refmap_bit(node, res);
2388 spin_unlock(&res->spinlock);
2392 dlm_lockres_calc_usage(dlm, res);
2394 mlog(ML_ERROR, "%s:%.*s: node %u trying to drop ref "
2395 "but it is already dropped!\n", dlm->name,
2396 res->lockname.len, res->lockname.name, node);
2397 dlm_print_one_lock_resource(res);
2403 item = kzalloc(sizeof(*item), GFP_NOFS);
2410 dlm_init_work_item(dlm, item, dlm_deref_lockres_worker, NULL);
2411 item->u.dl.deref_res = res;
2412 item->u.dl.deref_node = node;
2414 spin_lock(&dlm->work_lock);
2415 list_add_tail(&item->list, &dlm->work_list);
2416 spin_unlock(&dlm->work_lock);
2418 queue_work(dlm->dlm_worker, &dlm->dispatched_work);
2423 dlm_lockres_put(res);
2429 static void dlm_deref_lockres_worker(struct dlm_work_item *item, void *data)
2431 struct dlm_ctxt *dlm;
2432 struct dlm_lock_resource *res;
2437 res = item->u.dl.deref_res;
2438 node = item->u.dl.deref_node;
2440 spin_lock(&res->spinlock);
2441 BUG_ON(res->state & DLM_LOCK_RES_DROPPING_REF);
2442 if (test_bit(node, res->refmap)) {
2443 __dlm_wait_on_lockres_flags(res, DLM_LOCK_RES_SETREF_INPROG);
2444 dlm_lockres_clear_refmap_bit(node, res);
2447 spin_unlock(&res->spinlock);
2450 mlog(0, "%s:%.*s node %u ref dropped in dispatch\n",
2451 dlm->name, res->lockname.len, res->lockname.name, node);
2452 dlm_lockres_calc_usage(dlm, res);
2454 mlog(ML_ERROR, "%s:%.*s: node %u trying to drop ref "
2455 "but it is already dropped!\n", dlm->name,
2456 res->lockname.len, res->lockname.name, node);
2457 dlm_print_one_lock_resource(res);
2460 dlm_lockres_put(res);
2463 /* Checks whether the lockres can be migrated. Returns 0 if yes, < 0
2464 * if not. If 0, numlocks is set to the number of locks in the lockres.
2466 static int dlm_is_lockres_migrateable(struct dlm_ctxt *dlm,
2467 struct dlm_lock_resource *res,
2473 struct list_head *queue;
2474 struct dlm_lock *lock;
2476 assert_spin_locked(&res->spinlock);
2479 if (res->owner == DLM_LOCK_RES_OWNER_UNKNOWN) {
2480 mlog(0, "cannot migrate lockres with unknown owner!\n");
2484 if (res->owner != dlm->node_num) {
2485 mlog(0, "cannot migrate lockres this node doesn't own!\n");
2490 queue = &res->granted;
2491 for (i = 0; i < 3; i++) {
2492 list_for_each_entry(lock, queue, list) {
2494 if (lock->ml.node == dlm->node_num) {
2495 mlog(0, "found a lock owned by this node still "
2496 "on the %s queue! will not migrate this "
2497 "lockres\n", (i == 0 ? "granted" :
2498 (i == 1 ? "converting" :
2508 mlog(0, "migrateable lockres having %d locks\n", *numlocks);
2515 * DLM_MIGRATE_LOCKRES
2519 static int dlm_migrate_lockres(struct dlm_ctxt *dlm,
2520 struct dlm_lock_resource *res,
2523 struct dlm_master_list_entry *mle = NULL;
2524 struct dlm_master_list_entry *oldmle = NULL;
2525 struct dlm_migratable_lockres *mres = NULL;
2528 unsigned int namelen;
2536 name = res->lockname.name;
2537 namelen = res->lockname.len;
2539 mlog(0, "migrating %.*s to %u\n", namelen, name, target);
2542 * ensure this lockres is a proper candidate for migration
2544 spin_lock(&res->spinlock);
2545 ret = dlm_is_lockres_migrateable(dlm, res, &numlocks);
2547 spin_unlock(&res->spinlock);
2550 spin_unlock(&res->spinlock);
2553 if (numlocks == 0) {
2554 mlog(0, "no locks were found on this lockres! done!\n");
2559 * preallocate up front
2560 * if this fails, abort
2564 mres = (struct dlm_migratable_lockres *) __get_free_page(GFP_NOFS);
2570 mle = (struct dlm_master_list_entry *) kmem_cache_alloc(dlm_mle_cache,
2579 * find a node to migrate the lockres to
2582 mlog(0, "picking a migration node\n");
2583 spin_lock(&dlm->spinlock);
2584 /* pick a new node */
2585 if (!test_bit(target, dlm->domain_map) ||
2586 target >= O2NM_MAX_NODES) {
2587 target = dlm_pick_migration_target(dlm, res);
2589 mlog(0, "node %u chosen for migration\n", target);
2591 if (target >= O2NM_MAX_NODES ||
2592 !test_bit(target, dlm->domain_map)) {
2593 /* target chosen is not alive */
2598 spin_unlock(&dlm->spinlock);
2602 mlog(0, "continuing with target = %u\n", target);
2605 * clear any existing master requests and
2606 * add the migration mle to the list
2608 spin_lock(&dlm->master_lock);
2609 ret = dlm_add_migration_mle(dlm, res, mle, &oldmle, name,
2610 namelen, target, dlm->node_num);
2611 spin_unlock(&dlm->master_lock);
2612 spin_unlock(&dlm->spinlock);
2614 if (ret == -EEXIST) {
2615 mlog(0, "another process is already migrating it\n");
2621 * set the MIGRATING flag and flush asts
2622 * if we fail after this we need to re-dirty the lockres
2624 if (dlm_mark_lockres_migrating(dlm, res, target) < 0) {
2625 mlog(ML_ERROR, "tried to migrate %.*s to %u, but "
2626 "the target went down.\n", res->lockname.len,
2627 res->lockname.name, target);
2628 spin_lock(&res->spinlock);
2629 res->state &= ~DLM_LOCK_RES_MIGRATING;
2631 spin_unlock(&res->spinlock);
2637 /* master is known, detach if not already detached */
2638 dlm_mle_detach_hb_events(dlm, oldmle);
2639 dlm_put_mle(oldmle);
2644 dlm_mle_detach_hb_events(dlm, mle);
2647 kmem_cache_free(dlm_mle_cache, mle);
2653 * at this point, we have a migration target, an mle
2654 * in the master list, and the MIGRATING flag set on
2658 /* now that remote nodes are spinning on the MIGRATING flag,
2659 * ensure that all assert_master work is flushed. */
2660 flush_workqueue(dlm->dlm_worker);
2662 /* get an extra reference on the mle.
2663 * otherwise the assert_master from the new
2664 * master will destroy this.
2665 * also, make sure that all callers of dlm_get_mle
2666 * take both dlm->spinlock and dlm->master_lock */
2667 spin_lock(&dlm->spinlock);
2668 spin_lock(&dlm->master_lock);
2669 dlm_get_mle_inuse(mle);
2670 spin_unlock(&dlm->master_lock);
2671 spin_unlock(&dlm->spinlock);
2673 /* notify new node and send all lock state */
2674 /* call send_one_lockres with migration flag.
2675 * this serves as notice to the target node that a
2676 * migration is starting. */
2677 ret = dlm_send_one_lockres(dlm, res, mres, target,
2678 DLM_MRES_MIGRATION);
2681 mlog(0, "migration to node %u failed with %d\n",
2683 /* migration failed, detach and clean up mle */
2684 dlm_mle_detach_hb_events(dlm, mle);
2686 dlm_put_mle_inuse(mle);
2687 spin_lock(&res->spinlock);
2688 res->state &= ~DLM_LOCK_RES_MIGRATING;
2690 spin_unlock(&res->spinlock);
2694 /* at this point, the target sends a message to all nodes,
2695 * (using dlm_do_migrate_request). this node is skipped since
2696 * we had to put an mle in the list to begin the process. this
2697 * node now waits for target to do an assert master. this node
2698 * will be the last one notified, ensuring that the migration
2699 * is complete everywhere. if the target dies while this is
2700 * going on, some nodes could potentially see the target as the
2701 * master, so it is important that my recovery finds the migration
2702 * mle and sets the master to UNKNONWN. */
2705 /* wait for new node to assert master */
2707 ret = wait_event_interruptible_timeout(mle->wq,
2708 (atomic_read(&mle->woken) == 1),
2709 msecs_to_jiffies(5000));
2712 if (atomic_read(&mle->woken) == 1 ||
2713 res->owner == target)
2716 mlog(0, "%s:%.*s: timed out during migration\n",
2717 dlm->name, res->lockname.len, res->lockname.name);
2718 /* avoid hang during shutdown when migrating lockres
2719 * to a node which also goes down */
2720 if (dlm_is_node_dead(dlm, target)) {
2721 mlog(0, "%s:%.*s: expected migration "
2722 "target %u is no longer up, restarting\n",
2723 dlm->name, res->lockname.len,
2724 res->lockname.name, target);
2726 /* migration failed, detach and clean up mle */
2727 dlm_mle_detach_hb_events(dlm, mle);
2729 dlm_put_mle_inuse(mle);
2730 spin_lock(&res->spinlock);
2731 res->state &= ~DLM_LOCK_RES_MIGRATING;
2733 spin_unlock(&res->spinlock);
2737 mlog(0, "%s:%.*s: caught signal during migration\n",
2738 dlm->name, res->lockname.len, res->lockname.name);
2741 /* all done, set the owner, clear the flag */
2742 spin_lock(&res->spinlock);
2743 dlm_set_lockres_owner(dlm, res, target);
2744 res->state &= ~DLM_LOCK_RES_MIGRATING;
2745 dlm_remove_nonlocal_locks(dlm, res);
2746 spin_unlock(&res->spinlock);
2749 /* master is known, detach if not already detached */
2750 dlm_mle_detach_hb_events(dlm, mle);
2751 dlm_put_mle_inuse(mle);
2754 dlm_lockres_calc_usage(dlm, res);
2757 /* re-dirty the lockres if we failed */
2759 dlm_kick_thread(dlm, res);
2761 /* wake up waiters if the MIGRATING flag got set
2762 * but migration failed */
2768 free_page((unsigned long)mres);
2772 mlog(0, "returning %d\n", ret);
2776 #define DLM_MIGRATION_RETRY_MS 100
2778 /* Should be called only after beginning the domain leave process.
2779 * There should not be any remaining locks on nonlocal lock resources,
2780 * and there should be no local locks left on locally mastered resources.
2782 * Called with the dlm spinlock held, may drop it to do migration, but
2783 * will re-acquire before exit.
2785 * Returns: 1 if dlm->spinlock was dropped/retaken, 0 if never dropped */
2786 int dlm_empty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
2789 int lock_dropped = 0;
2792 spin_lock(&res->spinlock);
2793 if (res->owner != dlm->node_num) {
2794 if (!__dlm_lockres_unused(res)) {
2795 mlog(ML_ERROR, "%s:%.*s: this node is not master, "
2796 "trying to free this but locks remain\n",
2797 dlm->name, res->lockname.len, res->lockname.name);
2799 spin_unlock(&res->spinlock);
2803 /* No need to migrate a lockres having no locks */
2804 ret = dlm_is_lockres_migrateable(dlm, res, &numlocks);
2805 if (ret >= 0 && numlocks == 0) {
2806 spin_unlock(&res->spinlock);
2809 spin_unlock(&res->spinlock);
2811 /* Wheee! Migrate lockres here! Will sleep so drop spinlock. */
2812 spin_unlock(&dlm->spinlock);
2815 ret = dlm_migrate_lockres(dlm, res, O2NM_MAX_NODES);
2818 if (ret == -ENOTEMPTY) {
2819 mlog(ML_ERROR, "lockres %.*s still has local locks!\n",
2820 res->lockname.len, res->lockname.name);
2824 mlog(0, "lockres %.*s: migrate failed, "
2825 "retrying\n", res->lockname.len,
2826 res->lockname.name);
2827 msleep(DLM_MIGRATION_RETRY_MS);
2829 spin_lock(&dlm->spinlock);
2831 return lock_dropped;
2834 int dlm_lock_basts_flushed(struct dlm_ctxt *dlm, struct dlm_lock *lock)
2837 spin_lock(&dlm->ast_lock);
2838 spin_lock(&lock->spinlock);
2839 ret = (list_empty(&lock->bast_list) && !lock->bast_pending);
2840 spin_unlock(&lock->spinlock);
2841 spin_unlock(&dlm->ast_lock);
2845 static int dlm_migration_can_proceed(struct dlm_ctxt *dlm,
2846 struct dlm_lock_resource *res,
2850 spin_lock(&res->spinlock);
2851 can_proceed = !!(res->state & DLM_LOCK_RES_MIGRATING);
2852 spin_unlock(&res->spinlock);
2854 /* target has died, so make the caller break out of the
2855 * wait_event, but caller must recheck the domain_map */
2856 spin_lock(&dlm->spinlock);
2857 if (!test_bit(mig_target, dlm->domain_map))
2859 spin_unlock(&dlm->spinlock);
2863 static int dlm_lockres_is_dirty(struct dlm_ctxt *dlm,
2864 struct dlm_lock_resource *res)
2867 spin_lock(&res->spinlock);
2868 ret = !!(res->state & DLM_LOCK_RES_DIRTY);
2869 spin_unlock(&res->spinlock);
2874 static int dlm_mark_lockres_migrating(struct dlm_ctxt *dlm,
2875 struct dlm_lock_resource *res,
2880 mlog(0, "dlm_mark_lockres_migrating: %.*s, from %u to %u\n",
2881 res->lockname.len, res->lockname.name, dlm->node_num,
2883 /* need to set MIGRATING flag on lockres. this is done by
2884 * ensuring that all asts have been flushed for this lockres. */
2885 spin_lock(&res->spinlock);
2886 BUG_ON(res->migration_pending);
2887 res->migration_pending = 1;
2888 /* strategy is to reserve an extra ast then release
2889 * it below, letting the release do all of the work */
2890 __dlm_lockres_reserve_ast(res);
2891 spin_unlock(&res->spinlock);
2893 /* now flush all the pending asts */
2894 dlm_kick_thread(dlm, res);
2895 /* before waiting on DIRTY, block processes which may
2896 * try to dirty the lockres before MIGRATING is set */
2897 spin_lock(&res->spinlock);
2898 BUG_ON(res->state & DLM_LOCK_RES_BLOCK_DIRTY);
2899 res->state |= DLM_LOCK_RES_BLOCK_DIRTY;
2900 spin_unlock(&res->spinlock);
2901 /* now wait on any pending asts and the DIRTY state */
2902 wait_event(dlm->ast_wq, !dlm_lockres_is_dirty(dlm, res));
2903 dlm_lockres_release_ast(dlm, res);
2905 mlog(0, "about to wait on migration_wq, dirty=%s\n",
2906 res->state & DLM_LOCK_RES_DIRTY ? "yes" : "no");
2907 /* if the extra ref we just put was the final one, this
2908 * will pass thru immediately. otherwise, we need to wait
2909 * for the last ast to finish. */
2911 ret = wait_event_interruptible_timeout(dlm->migration_wq,
2912 dlm_migration_can_proceed(dlm, res, target),
2913 msecs_to_jiffies(1000));
2915 mlog(0, "woken again: migrating? %s, dead? %s\n",
2916 res->state & DLM_LOCK_RES_MIGRATING ? "yes":"no",
2917 test_bit(target, dlm->domain_map) ? "no":"yes");
2919 mlog(0, "all is well: migrating? %s, dead? %s\n",
2920 res->state & DLM_LOCK_RES_MIGRATING ? "yes":"no",
2921 test_bit(target, dlm->domain_map) ? "no":"yes");
2923 if (!dlm_migration_can_proceed(dlm, res, target)) {
2924 mlog(0, "trying again...\n");
2927 /* now that we are sure the MIGRATING state is there, drop
2928 * the unneded state which blocked threads trying to DIRTY */
2929 spin_lock(&res->spinlock);
2930 BUG_ON(!(res->state & DLM_LOCK_RES_BLOCK_DIRTY));
2931 BUG_ON(!(res->state & DLM_LOCK_RES_MIGRATING));
2932 res->state &= ~DLM_LOCK_RES_BLOCK_DIRTY;
2933 spin_unlock(&res->spinlock);
2935 /* did the target go down or die? */
2936 spin_lock(&dlm->spinlock);
2937 if (!test_bit(target, dlm->domain_map)) {
2938 mlog(ML_ERROR, "aha. migration target %u just went down\n",
2942 spin_unlock(&dlm->spinlock);
2947 * o the DLM_LOCK_RES_MIGRATING flag is set
2948 * o there are no pending asts on this lockres
2949 * o all processes trying to reserve an ast on this
2950 * lockres must wait for the MIGRATING flag to clear
2955 /* last step in the migration process.
2956 * original master calls this to free all of the dlm_lock
2957 * structures that used to be for other nodes. */
2958 static void dlm_remove_nonlocal_locks(struct dlm_ctxt *dlm,
2959 struct dlm_lock_resource *res)
2961 struct list_head *queue = &res->granted;
2963 struct dlm_lock *lock, *next;
2965 assert_spin_locked(&res->spinlock);
2967 BUG_ON(res->owner == dlm->node_num);
2969 for (i=0; i<3; i++) {
2970 list_for_each_entry_safe(lock, next, queue, list) {
2971 if (lock->ml.node != dlm->node_num) {
2972 mlog(0, "putting lock for node %u\n",
2974 /* be extra careful */
2975 BUG_ON(!list_empty(&lock->ast_list));
2976 BUG_ON(!list_empty(&lock->bast_list));
2977 BUG_ON(lock->ast_pending);
2978 BUG_ON(lock->bast_pending);
2979 dlm_lockres_clear_refmap_bit(lock->ml.node, res);
2980 list_del_init(&lock->list);
2982 /* In a normal unlock, we would have added a
2983 * DLM_UNLOCK_FREE_LOCK action. Force it. */
2991 bit = find_next_bit(res->refmap, O2NM_MAX_NODES, bit);
2992 if (bit >= O2NM_MAX_NODES)
2994 /* do not clear the local node reference, if there is a
2995 * process holding this, let it drop the ref itself */
2996 if (bit != dlm->node_num) {
2997 mlog(0, "%s:%.*s: node %u had a ref to this "
2998 "migrating lockres, clearing\n", dlm->name,
2999 res->lockname.len, res->lockname.name, bit);
3000 dlm_lockres_clear_refmap_bit(bit, res);
3006 /* for now this is not too intelligent. we will
3007 * need stats to make this do the right thing.
3008 * this just finds the first lock on one of the
3009 * queues and uses that node as the target. */
3010 static u8 dlm_pick_migration_target(struct dlm_ctxt *dlm,
3011 struct dlm_lock_resource *res)
3014 struct list_head *queue = &res->granted;
3015 struct dlm_lock *lock;
3018 assert_spin_locked(&dlm->spinlock);
3020 spin_lock(&res->spinlock);
3021 for (i=0; i<3; i++) {
3022 list_for_each_entry(lock, queue, list) {
3023 /* up to the caller to make sure this node
3025 if (lock->ml.node != dlm->node_num) {
3026 spin_unlock(&res->spinlock);
3027 return lock->ml.node;
3032 spin_unlock(&res->spinlock);
3033 mlog(0, "have not found a suitable target yet! checking domain map\n");
3035 /* ok now we're getting desperate. pick anyone alive. */
3038 nodenum = find_next_bit(dlm->domain_map,
3039 O2NM_MAX_NODES, nodenum+1);
3040 mlog(0, "found %d in domain map\n", nodenum);
3041 if (nodenum >= O2NM_MAX_NODES)
3043 if (nodenum != dlm->node_num) {
3044 mlog(0, "picking %d\n", nodenum);
3049 mlog(0, "giving up. no master to migrate to\n");
3050 return DLM_LOCK_RES_OWNER_UNKNOWN;
3055 /* this is called by the new master once all lockres
3056 * data has been received */
3057 static int dlm_do_migrate_request(struct dlm_ctxt *dlm,
3058 struct dlm_lock_resource *res,
3059 u8 master, u8 new_master,
3060 struct dlm_node_iter *iter)
3062 struct dlm_migrate_request migrate;
3063 int ret, status = 0;
3066 memset(&migrate, 0, sizeof(migrate));
3067 migrate.namelen = res->lockname.len;
3068 memcpy(migrate.name, res->lockname.name, migrate.namelen);
3069 migrate.new_master = new_master;
3070 migrate.master = master;
3074 /* send message to all nodes, except the master and myself */
3075 while ((nodenum = dlm_node_iter_next(iter)) >= 0) {
3076 if (nodenum == master ||
3077 nodenum == new_master)
3080 ret = o2net_send_message(DLM_MIGRATE_REQUEST_MSG, dlm->key,
3081 &migrate, sizeof(migrate), nodenum,
3085 else if (status < 0) {
3086 mlog(0, "migrate request (node %u) returned %d!\n",
3089 } else if (status == DLM_MIGRATE_RESPONSE_MASTERY_REF) {
3090 /* during the migration request we short-circuited
3091 * the mastery of the lockres. make sure we have
3092 * a mastery ref for nodenum */
3093 mlog(0, "%s:%.*s: need ref for node %u\n",
3094 dlm->name, res->lockname.len, res->lockname.name,
3096 spin_lock(&res->spinlock);
3097 dlm_lockres_set_refmap_bit(nodenum, res);
3098 spin_unlock(&res->spinlock);
3105 mlog(0, "returning ret=%d\n", ret);
3110 /* if there is an existing mle for this lockres, we now know who the master is.
3111 * (the one who sent us *this* message) we can clear it up right away.
3112 * since the process that put the mle on the list still has a reference to it,
3113 * we can unhash it now, set the master and wake the process. as a result,
3114 * we will have no mle in the list to start with. now we can add an mle for
3115 * the migration and this should be the only one found for those scanning the
3117 int dlm_migrate_request_handler(struct o2net_msg *msg, u32 len, void *data,
3120 struct dlm_ctxt *dlm = data;
3121 struct dlm_lock_resource *res = NULL;
3122 struct dlm_migrate_request *migrate = (struct dlm_migrate_request *) msg->buf;
3123 struct dlm_master_list_entry *mle = NULL, *oldmle = NULL;
3125 unsigned int namelen, hash;
3131 name = migrate->name;
3132 namelen = migrate->namelen;
3133 hash = dlm_lockid_hash(name, namelen);
3135 /* preallocate.. if this fails, abort */
3136 mle = (struct dlm_master_list_entry *) kmem_cache_alloc(dlm_mle_cache,
3144 /* check for pre-existing lock */
3145 spin_lock(&dlm->spinlock);
3146 res = __dlm_lookup_lockres(dlm, name, namelen, hash);
3147 spin_lock(&dlm->master_lock);
3150 spin_lock(&res->spinlock);
3151 if (res->state & DLM_LOCK_RES_RECOVERING) {
3152 /* if all is working ok, this can only mean that we got
3153 * a migrate request from a node that we now see as
3154 * dead. what can we do here? drop it to the floor? */
3155 spin_unlock(&res->spinlock);
3156 mlog(ML_ERROR, "Got a migrate request, but the "
3157 "lockres is marked as recovering!");
3158 kmem_cache_free(dlm_mle_cache, mle);
3159 ret = -EINVAL; /* need a better solution */
3162 res->state |= DLM_LOCK_RES_MIGRATING;
3163 spin_unlock(&res->spinlock);
3166 /* ignore status. only nonzero status would BUG. */
3167 ret = dlm_add_migration_mle(dlm, res, mle, &oldmle,
3169 migrate->new_master,
3173 spin_unlock(&dlm->master_lock);
3174 spin_unlock(&dlm->spinlock);
3177 /* master is known, detach if not already detached */
3178 dlm_mle_detach_hb_events(dlm, oldmle);
3179 dlm_put_mle(oldmle);
3183 dlm_lockres_put(res);
3189 /* must be holding dlm->spinlock and dlm->master_lock
3190 * when adding a migration mle, we can clear any other mles
3191 * in the master list because we know with certainty that
3192 * the master is "master". so we remove any old mle from
3193 * the list after setting it's master field, and then add
3194 * the new migration mle. this way we can hold with the rule
3195 * of having only one mle for a given lock name at all times. */
3196 static int dlm_add_migration_mle(struct dlm_ctxt *dlm,
3197 struct dlm_lock_resource *res,
3198 struct dlm_master_list_entry *mle,
3199 struct dlm_master_list_entry **oldmle,
3200 const char *name, unsigned int namelen,
3201 u8 new_master, u8 master)
3210 assert_spin_locked(&dlm->spinlock);
3211 assert_spin_locked(&dlm->master_lock);
3213 /* caller is responsible for any ref taken here on oldmle */
3214 found = dlm_find_mle(dlm, oldmle, (char *)name, namelen);
3216 struct dlm_master_list_entry *tmp = *oldmle;
3217 spin_lock(&tmp->spinlock);
3218 if (tmp->type == DLM_MLE_MIGRATION) {
3219 if (master == dlm->node_num) {
3220 /* ah another process raced me to it */
3221 mlog(0, "tried to migrate %.*s, but some "
3222 "process beat me to it\n",
3226 /* bad. 2 NODES are trying to migrate! */
3227 mlog(ML_ERROR, "migration error mle: "
3228 "master=%u new_master=%u // request: "
3229 "master=%u new_master=%u // "
3231 tmp->master, tmp->new_master,
3237 /* this is essentially what assert_master does */
3238 tmp->master = master;
3239 atomic_set(&tmp->woken, 1);
3241 /* remove it from the list so that only one
3242 * mle will be found */
3243 list_del_init(&tmp->list);
3244 /* this was obviously WRONG. mle is uninited here. should be tmp. */
3245 __dlm_mle_detach_hb_events(dlm, tmp);
3246 ret = DLM_MIGRATE_RESPONSE_MASTERY_REF;
3247 mlog(0, "%s:%.*s: master=%u, newmaster=%u, "
3248 "telling master to get ref for cleared out mle "
3249 "during migration\n", dlm->name, namelen, name,
3250 master, new_master);
3252 spin_unlock(&tmp->spinlock);
3255 /* now add a migration mle to the tail of the list */
3256 dlm_init_mle(mle, DLM_MLE_MIGRATION, dlm, res, name, namelen);
3257 mle->new_master = new_master;
3258 /* the new master will be sending an assert master for this.
3259 * at that point we will get the refmap reference */
3260 mle->master = master;
3261 /* do this for consistency with other mle types */
3262 set_bit(new_master, mle->maybe_map);
3263 list_add(&mle->list, &dlm->master_list);
3269 void dlm_clean_master_list(struct dlm_ctxt *dlm, u8 dead_node)
3271 struct dlm_master_list_entry *mle, *next;
3272 struct dlm_lock_resource *res;
3275 mlog_entry("dlm=%s, dead node=%u\n", dlm->name, dead_node);
3277 assert_spin_locked(&dlm->spinlock);
3279 /* clean the master list */
3280 spin_lock(&dlm->master_lock);
3281 list_for_each_entry_safe(mle, next, &dlm->master_list, list) {
3282 BUG_ON(mle->type != DLM_MLE_BLOCK &&
3283 mle->type != DLM_MLE_MASTER &&
3284 mle->type != DLM_MLE_MIGRATION);
3286 /* MASTER mles are initiated locally. the waiting
3287 * process will notice the node map change
3288 * shortly. let that happen as normal. */
3289 if (mle->type == DLM_MLE_MASTER)
3293 /* BLOCK mles are initiated by other nodes.
3294 * need to clean up if the dead node would have
3295 * been the master. */
3296 if (mle->type == DLM_MLE_BLOCK) {
3299 spin_lock(&mle->spinlock);
3300 bit = find_next_bit(mle->maybe_map, O2NM_MAX_NODES, 0);
3301 if (bit != dead_node) {
3302 mlog(0, "mle found, but dead node %u would "
3303 "not have been master\n", dead_node);
3304 spin_unlock(&mle->spinlock);
3306 /* must drop the refcount by one since the
3307 * assert_master will never arrive. this
3308 * may result in the mle being unlinked and
3309 * freed, but there may still be a process
3310 * waiting in the dlmlock path which is fine. */
3311 mlog(0, "node %u was expected master\n",
3313 atomic_set(&mle->woken, 1);
3314 spin_unlock(&mle->spinlock);
3316 /* do not need events any longer, so detach
3318 __dlm_mle_detach_hb_events(dlm, mle);
3324 /* everything else is a MIGRATION mle */
3326 /* the rule for MIGRATION mles is that the master
3327 * becomes UNKNOWN if *either* the original or
3328 * the new master dies. all UNKNOWN lockreses
3329 * are sent to whichever node becomes the recovery
3330 * master. the new master is responsible for
3331 * determining if there is still a master for
3332 * this lockres, or if he needs to take over
3333 * mastery. either way, this node should expect
3334 * another message to resolve this. */
3335 if (mle->master != dead_node &&
3336 mle->new_master != dead_node)
3339 /* if we have reached this point, this mle needs to
3340 * be removed from the list and freed. */
3342 /* remove from the list early. NOTE: unlinking
3343 * list_head while in list_for_each_safe */
3344 __dlm_mle_detach_hb_events(dlm, mle);
3345 spin_lock(&mle->spinlock);
3346 list_del_init(&mle->list);
3347 atomic_set(&mle->woken, 1);
3348 spin_unlock(&mle->spinlock);
3351 mlog(0, "%s: node %u died during migration from "
3352 "%u to %u!\n", dlm->name, dead_node,
3353 mle->master, mle->new_master);
3354 /* if there is a lockres associated with this
3355 * mle, find it and set its owner to UNKNOWN */
3356 hash = dlm_lockid_hash(mle->u.name.name, mle->u.name.len);
3357 res = __dlm_lookup_lockres(dlm, mle->u.name.name,
3358 mle->u.name.len, hash);
3360 /* unfortunately if we hit this rare case, our
3361 * lock ordering is messed. we need to drop
3362 * the master lock so that we can take the
3363 * lockres lock, meaning that we will have to
3364 * restart from the head of list. */
3365 spin_unlock(&dlm->master_lock);
3367 /* move lockres onto recovery list */
3368 spin_lock(&res->spinlock);
3369 dlm_set_lockres_owner(dlm, res,
3370 DLM_LOCK_RES_OWNER_UNKNOWN);
3371 dlm_move_lockres_to_recovery_list(dlm, res);
3372 spin_unlock(&res->spinlock);
3373 dlm_lockres_put(res);
3375 /* about to get rid of mle, detach from heartbeat */
3376 __dlm_mle_detach_hb_events(dlm, mle);
3379 spin_lock(&dlm->master_lock);
3381 spin_unlock(&dlm->master_lock);
3387 /* this may be the last reference */
3390 spin_unlock(&dlm->master_lock);
3394 int dlm_finish_migration(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
3397 struct dlm_node_iter iter;
3400 spin_lock(&dlm->spinlock);
3401 dlm_node_iter_init(dlm->domain_map, &iter);
3402 clear_bit(old_master, iter.node_map);
3403 clear_bit(dlm->node_num, iter.node_map);
3404 spin_unlock(&dlm->spinlock);
3406 /* ownership of the lockres is changing. account for the
3407 * mastery reference here since old_master will briefly have
3408 * a reference after the migration completes */
3409 spin_lock(&res->spinlock);
3410 dlm_lockres_set_refmap_bit(old_master, res);
3411 spin_unlock(&res->spinlock);
3413 mlog(0, "now time to do a migrate request to other nodes\n");
3414 ret = dlm_do_migrate_request(dlm, res, old_master,
3415 dlm->node_num, &iter);
3421 mlog(0, "doing assert master of %.*s to all except the original node\n",
3422 res->lockname.len, res->lockname.name);
3423 /* this call now finishes out the nodemap
3424 * even if one or more nodes die */
3425 ret = dlm_do_assert_master(dlm, res, iter.node_map,
3426 DLM_ASSERT_MASTER_FINISH_MIGRATION);
3428 /* no longer need to retry. all living nodes contacted. */
3433 memset(iter.node_map, 0, sizeof(iter.node_map));
3434 set_bit(old_master, iter.node_map);
3435 mlog(0, "doing assert master of %.*s back to %u\n",
3436 res->lockname.len, res->lockname.name, old_master);
3437 ret = dlm_do_assert_master(dlm, res, iter.node_map,
3438 DLM_ASSERT_MASTER_FINISH_MIGRATION);
3440 mlog(0, "assert master to original master failed "
3442 /* the only nonzero status here would be because of
3443 * a dead original node. we're done. */
3447 /* all done, set the owner, clear the flag */
3448 spin_lock(&res->spinlock);
3449 dlm_set_lockres_owner(dlm, res, dlm->node_num);
3450 res->state &= ~DLM_LOCK_RES_MIGRATING;
3451 spin_unlock(&res->spinlock);
3452 /* re-dirty it on the new master */
3453 dlm_kick_thread(dlm, res);
3460 * LOCKRES AST REFCOUNT
3461 * this is integral to migration
3464 /* for future intent to call an ast, reserve one ahead of time.
3465 * this should be called only after waiting on the lockres
3466 * with dlm_wait_on_lockres, and while still holding the
3467 * spinlock after the call. */
3468 void __dlm_lockres_reserve_ast(struct dlm_lock_resource *res)
3470 assert_spin_locked(&res->spinlock);
3471 if (res->state & DLM_LOCK_RES_MIGRATING) {
3472 __dlm_print_one_lock_resource(res);
3474 BUG_ON(res->state & DLM_LOCK_RES_MIGRATING);
3476 atomic_inc(&res->asts_reserved);
3480 * used to drop the reserved ast, either because it went unused,
3481 * or because the ast/bast was actually called.
3483 * also, if there is a pending migration on this lockres,
3484 * and this was the last pending ast on the lockres,
3485 * atomically set the MIGRATING flag before we drop the lock.
3486 * this is how we ensure that migration can proceed with no
3487 * asts in progress. note that it is ok if the state of the
3488 * queues is such that a lock should be granted in the future
3489 * or that a bast should be fired, because the new master will
3490 * shuffle the lists on this lockres as soon as it is migrated.
3492 void dlm_lockres_release_ast(struct dlm_ctxt *dlm,
3493 struct dlm_lock_resource *res)
3495 if (!atomic_dec_and_lock(&res->asts_reserved, &res->spinlock))
3498 if (!res->migration_pending) {
3499 spin_unlock(&res->spinlock);
3503 BUG_ON(res->state & DLM_LOCK_RES_MIGRATING);
3504 res->migration_pending = 0;
3505 res->state |= DLM_LOCK_RES_MIGRATING;
3506 spin_unlock(&res->spinlock);
3508 wake_up(&dlm->migration_wq);