1 /* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
6 * standalone DLM module
8 * Copyright (C) 2004 Oracle. All rights reserved.
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public
12 * License as published by the Free Software Foundation; either
13 * version 2 of the License, or (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
20 * You should have received a copy of the GNU General Public
21 * License along with this program; if not, write to the
22 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
23 * Boston, MA 021110-1307, USA.
28 #include <linux/module.h>
30 #include <linux/types.h>
31 #include <linux/slab.h>
32 #include <linux/highmem.h>
33 #include <linux/utsname.h>
34 #include <linux/init.h>
35 #include <linux/sysctl.h>
36 #include <linux/random.h>
37 #include <linux/blkdev.h>
38 #include <linux/socket.h>
39 #include <linux/inet.h>
40 #include <linux/spinlock.h>
41 #include <linux/delay.h>
44 #include "cluster/heartbeat.h"
45 #include "cluster/nodemanager.h"
46 #include "cluster/tcp.h"
49 #include "dlmcommon.h"
50 #include "dlmdomain.h"
52 #define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_MASTER)
53 #include "cluster/masklog.h"
55 static void dlm_mle_node_down(struct dlm_ctxt *dlm,
56 struct dlm_master_list_entry *mle,
57 struct o2nm_node *node,
59 static void dlm_mle_node_up(struct dlm_ctxt *dlm,
60 struct dlm_master_list_entry *mle,
61 struct o2nm_node *node,
64 static void dlm_assert_master_worker(struct dlm_work_item *item, void *data);
65 static int dlm_do_assert_master(struct dlm_ctxt *dlm,
66 struct dlm_lock_resource *res,
67 void *nodemap, u32 flags);
68 static void dlm_deref_lockres_worker(struct dlm_work_item *item, void *data);
70 static inline int dlm_mle_equal(struct dlm_ctxt *dlm,
71 struct dlm_master_list_entry *mle,
75 struct dlm_lock_resource *res;
80 if (mle->type == DLM_MLE_BLOCK ||
81 mle->type == DLM_MLE_MIGRATION) {
82 if (namelen != mle->u.name.len ||
83 memcmp(name, mle->u.name.name, namelen)!=0)
87 if (namelen != res->lockname.len ||
88 memcmp(res->lockname.name, name, namelen) != 0)
94 #define dlm_print_nodemap(m) _dlm_print_nodemap(m,#m)
95 static void _dlm_print_nodemap(unsigned long *map, const char *mapname)
98 printk("%s=[ ", mapname);
99 for (i=0; i<O2NM_MAX_NODES; i++)
100 if (test_bit(i, map))
105 static void dlm_print_one_mle(struct dlm_master_list_entry *mle)
111 unsigned int namelen;
114 unsigned long *maybe = mle->maybe_map,
115 *vote = mle->vote_map,
116 *resp = mle->response_map,
117 *node = mle->node_map;
120 if (mle->type == DLM_MLE_BLOCK)
122 else if (mle->type == DLM_MLE_MASTER)
126 refs = atomic_read(&k->refcount);
127 master = mle->master;
128 attached = (list_empty(&mle->hb_events) ? 'N' : 'Y');
130 if (mle->type != DLM_MLE_MASTER) {
131 namelen = mle->u.name.len;
132 name = mle->u.name.name;
134 namelen = mle->u.res->lockname.len;
135 name = mle->u.res->lockname.name;
138 mlog(ML_NOTICE, "%.*s: %3s refs=%3d mas=%3u new=%3u evt=%c inuse=%d ",
139 namelen, name, type, refs, master, mle->new_master, attached,
141 dlm_print_nodemap(maybe);
143 dlm_print_nodemap(vote);
145 dlm_print_nodemap(resp);
147 dlm_print_nodemap(node);
153 /* Code here is included but defined out as it aids debugging */
155 static void dlm_dump_mles(struct dlm_ctxt *dlm)
157 struct dlm_master_list_entry *mle;
159 mlog(ML_NOTICE, "dumping all mles for domain %s:\n", dlm->name);
160 spin_lock(&dlm->master_lock);
161 list_for_each_entry(mle, &dlm->master_list, list)
162 dlm_print_one_mle(mle);
163 spin_unlock(&dlm->master_lock);
166 int dlm_dump_all_mles(const char __user *data, unsigned int len)
168 struct dlm_ctxt *dlm;
170 spin_lock(&dlm_domain_lock);
171 list_for_each_entry(dlm, &dlm_domains, list) {
172 mlog(ML_NOTICE, "found dlm: %p, name=%s\n", dlm, dlm->name);
175 spin_unlock(&dlm_domain_lock);
178 EXPORT_SYMBOL_GPL(dlm_dump_all_mles);
182 static struct kmem_cache *dlm_lockres_cache = NULL;
183 static struct kmem_cache *dlm_lockname_cache = NULL;
184 static struct kmem_cache *dlm_mle_cache = NULL;
186 static void dlm_mle_release(struct kref *kref);
187 static void dlm_init_mle(struct dlm_master_list_entry *mle,
188 enum dlm_mle_type type,
189 struct dlm_ctxt *dlm,
190 struct dlm_lock_resource *res,
192 unsigned int namelen);
193 static void dlm_put_mle(struct dlm_master_list_entry *mle);
194 static void __dlm_put_mle(struct dlm_master_list_entry *mle);
195 static int dlm_find_mle(struct dlm_ctxt *dlm,
196 struct dlm_master_list_entry **mle,
197 char *name, unsigned int namelen);
199 static int dlm_do_master_request(struct dlm_lock_resource *res,
200 struct dlm_master_list_entry *mle, int to);
203 static int dlm_wait_for_lock_mastery(struct dlm_ctxt *dlm,
204 struct dlm_lock_resource *res,
205 struct dlm_master_list_entry *mle,
207 static int dlm_restart_lock_mastery(struct dlm_ctxt *dlm,
208 struct dlm_lock_resource *res,
209 struct dlm_master_list_entry *mle,
211 static int dlm_add_migration_mle(struct dlm_ctxt *dlm,
212 struct dlm_lock_resource *res,
213 struct dlm_master_list_entry *mle,
214 struct dlm_master_list_entry **oldmle,
215 const char *name, unsigned int namelen,
216 u8 new_master, u8 master);
218 static u8 dlm_pick_migration_target(struct dlm_ctxt *dlm,
219 struct dlm_lock_resource *res);
220 static void dlm_remove_nonlocal_locks(struct dlm_ctxt *dlm,
221 struct dlm_lock_resource *res);
222 static int dlm_mark_lockres_migrating(struct dlm_ctxt *dlm,
223 struct dlm_lock_resource *res,
225 static int dlm_pre_master_reco_lockres(struct dlm_ctxt *dlm,
226 struct dlm_lock_resource *res);
229 int dlm_is_host_down(int errno)
246 case -EINVAL: /* if returned from our tcp code,
247 this means there is no socket */
255 * MASTER LIST FUNCTIONS
260 * regarding master list entries and heartbeat callbacks:
262 * in order to avoid sleeping and allocation that occurs in
263 * heartbeat, master list entries are simply attached to the
264 * dlm's established heartbeat callbacks. the mle is attached
265 * when it is created, and since the dlm->spinlock is held at
266 * that time, any heartbeat event will be properly discovered
267 * by the mle. the mle needs to be detached from the
268 * dlm->mle_hb_events list as soon as heartbeat events are no
269 * longer useful to the mle, and before the mle is freed.
271 * as a general rule, heartbeat events are no longer needed by
272 * the mle once an "answer" regarding the lock master has been
275 static inline void __dlm_mle_attach_hb_events(struct dlm_ctxt *dlm,
276 struct dlm_master_list_entry *mle)
278 assert_spin_locked(&dlm->spinlock);
280 list_add_tail(&mle->hb_events, &dlm->mle_hb_events);
284 static inline void __dlm_mle_detach_hb_events(struct dlm_ctxt *dlm,
285 struct dlm_master_list_entry *mle)
287 if (!list_empty(&mle->hb_events))
288 list_del_init(&mle->hb_events);
292 static inline void dlm_mle_detach_hb_events(struct dlm_ctxt *dlm,
293 struct dlm_master_list_entry *mle)
295 spin_lock(&dlm->spinlock);
296 __dlm_mle_detach_hb_events(dlm, mle);
297 spin_unlock(&dlm->spinlock);
300 static void dlm_get_mle_inuse(struct dlm_master_list_entry *mle)
302 struct dlm_ctxt *dlm;
305 assert_spin_locked(&dlm->spinlock);
306 assert_spin_locked(&dlm->master_lock);
308 kref_get(&mle->mle_refs);
311 static void dlm_put_mle_inuse(struct dlm_master_list_entry *mle)
313 struct dlm_ctxt *dlm;
316 spin_lock(&dlm->spinlock);
317 spin_lock(&dlm->master_lock);
320 spin_unlock(&dlm->master_lock);
321 spin_unlock(&dlm->spinlock);
325 /* remove from list and free */
326 static void __dlm_put_mle(struct dlm_master_list_entry *mle)
328 struct dlm_ctxt *dlm;
331 assert_spin_locked(&dlm->spinlock);
332 assert_spin_locked(&dlm->master_lock);
333 if (!atomic_read(&mle->mle_refs.refcount)) {
334 /* this may or may not crash, but who cares.
336 mlog(ML_ERROR, "bad mle: %p\n", mle);
337 dlm_print_one_mle(mle);
340 kref_put(&mle->mle_refs, dlm_mle_release);
344 /* must not have any spinlocks coming in */
345 static void dlm_put_mle(struct dlm_master_list_entry *mle)
347 struct dlm_ctxt *dlm;
350 spin_lock(&dlm->spinlock);
351 spin_lock(&dlm->master_lock);
353 spin_unlock(&dlm->master_lock);
354 spin_unlock(&dlm->spinlock);
357 static inline void dlm_get_mle(struct dlm_master_list_entry *mle)
359 kref_get(&mle->mle_refs);
362 static void dlm_init_mle(struct dlm_master_list_entry *mle,
363 enum dlm_mle_type type,
364 struct dlm_ctxt *dlm,
365 struct dlm_lock_resource *res,
367 unsigned int namelen)
369 assert_spin_locked(&dlm->spinlock);
373 INIT_LIST_HEAD(&mle->list);
374 INIT_LIST_HEAD(&mle->hb_events);
375 memset(mle->maybe_map, 0, sizeof(mle->maybe_map));
376 spin_lock_init(&mle->spinlock);
377 init_waitqueue_head(&mle->wq);
378 atomic_set(&mle->woken, 0);
379 kref_init(&mle->mle_refs);
380 memset(mle->response_map, 0, sizeof(mle->response_map));
381 mle->master = O2NM_MAX_NODES;
382 mle->new_master = O2NM_MAX_NODES;
385 if (mle->type == DLM_MLE_MASTER) {
388 } else if (mle->type == DLM_MLE_BLOCK) {
390 memcpy(mle->u.name.name, name, namelen);
391 mle->u.name.len = namelen;
392 } else /* DLM_MLE_MIGRATION */ {
394 memcpy(mle->u.name.name, name, namelen);
395 mle->u.name.len = namelen;
398 /* copy off the node_map and register hb callbacks on our copy */
399 memcpy(mle->node_map, dlm->domain_map, sizeof(mle->node_map));
400 memcpy(mle->vote_map, dlm->domain_map, sizeof(mle->vote_map));
401 clear_bit(dlm->node_num, mle->vote_map);
402 clear_bit(dlm->node_num, mle->node_map);
404 /* attach the mle to the domain node up/down events */
405 __dlm_mle_attach_hb_events(dlm, mle);
409 /* returns 1 if found, 0 if not */
410 static int dlm_find_mle(struct dlm_ctxt *dlm,
411 struct dlm_master_list_entry **mle,
412 char *name, unsigned int namelen)
414 struct dlm_master_list_entry *tmpmle;
416 assert_spin_locked(&dlm->master_lock);
418 list_for_each_entry(tmpmle, &dlm->master_list, list) {
419 if (!dlm_mle_equal(dlm, tmpmle, name, namelen))
428 void dlm_hb_event_notify_attached(struct dlm_ctxt *dlm, int idx, int node_up)
430 struct dlm_master_list_entry *mle;
432 assert_spin_locked(&dlm->spinlock);
434 list_for_each_entry(mle, &dlm->mle_hb_events, hb_events) {
436 dlm_mle_node_up(dlm, mle, NULL, idx);
438 dlm_mle_node_down(dlm, mle, NULL, idx);
442 static void dlm_mle_node_down(struct dlm_ctxt *dlm,
443 struct dlm_master_list_entry *mle,
444 struct o2nm_node *node, int idx)
446 spin_lock(&mle->spinlock);
448 if (!test_bit(idx, mle->node_map))
449 mlog(0, "node %u already removed from nodemap!\n", idx);
451 clear_bit(idx, mle->node_map);
453 spin_unlock(&mle->spinlock);
456 static void dlm_mle_node_up(struct dlm_ctxt *dlm,
457 struct dlm_master_list_entry *mle,
458 struct o2nm_node *node, int idx)
460 spin_lock(&mle->spinlock);
462 if (test_bit(idx, mle->node_map))
463 mlog(0, "node %u already in node map!\n", idx);
465 set_bit(idx, mle->node_map);
467 spin_unlock(&mle->spinlock);
471 int dlm_init_mle_cache(void)
473 dlm_mle_cache = kmem_cache_create("o2dlm_mle",
474 sizeof(struct dlm_master_list_entry),
475 0, SLAB_HWCACHE_ALIGN,
477 if (dlm_mle_cache == NULL)
482 void dlm_destroy_mle_cache(void)
485 kmem_cache_destroy(dlm_mle_cache);
488 static void dlm_mle_release(struct kref *kref)
490 struct dlm_master_list_entry *mle;
491 struct dlm_ctxt *dlm;
495 mle = container_of(kref, struct dlm_master_list_entry, mle_refs);
498 if (mle->type != DLM_MLE_MASTER) {
499 mlog(0, "calling mle_release for %.*s, type %d\n",
500 mle->u.name.len, mle->u.name.name, mle->type);
502 mlog(0, "calling mle_release for %.*s, type %d\n",
503 mle->u.res->lockname.len,
504 mle->u.res->lockname.name, mle->type);
506 assert_spin_locked(&dlm->spinlock);
507 assert_spin_locked(&dlm->master_lock);
509 /* remove from list if not already */
510 if (!list_empty(&mle->list))
511 list_del_init(&mle->list);
513 /* detach the mle from the domain node up/down events */
514 __dlm_mle_detach_hb_events(dlm, mle);
516 /* NOTE: kfree under spinlock here.
517 * if this is bad, we can move this to a freelist. */
518 kmem_cache_free(dlm_mle_cache, mle);
523 * LOCK RESOURCE FUNCTIONS
526 int dlm_init_master_caches(void)
528 dlm_lockres_cache = kmem_cache_create("o2dlm_lockres",
529 sizeof(struct dlm_lock_resource),
530 0, SLAB_HWCACHE_ALIGN, NULL);
531 if (!dlm_lockres_cache)
534 dlm_lockname_cache = kmem_cache_create("o2dlm_lockname",
535 DLM_LOCKID_NAME_MAX, 0,
536 SLAB_HWCACHE_ALIGN, NULL);
537 if (!dlm_lockname_cache)
542 dlm_destroy_master_caches();
546 void dlm_destroy_master_caches(void)
548 if (dlm_lockname_cache)
549 kmem_cache_destroy(dlm_lockname_cache);
551 if (dlm_lockres_cache)
552 kmem_cache_destroy(dlm_lockres_cache);
555 static void dlm_set_lockres_owner(struct dlm_ctxt *dlm,
556 struct dlm_lock_resource *res,
559 assert_spin_locked(&res->spinlock);
561 mlog_entry("%.*s, %u\n", res->lockname.len, res->lockname.name, owner);
563 if (owner == dlm->node_num)
564 atomic_inc(&dlm->local_resources);
565 else if (owner == DLM_LOCK_RES_OWNER_UNKNOWN)
566 atomic_inc(&dlm->unknown_resources);
568 atomic_inc(&dlm->remote_resources);
573 void dlm_change_lockres_owner(struct dlm_ctxt *dlm,
574 struct dlm_lock_resource *res, u8 owner)
576 assert_spin_locked(&res->spinlock);
578 if (owner == res->owner)
581 if (res->owner == dlm->node_num)
582 atomic_dec(&dlm->local_resources);
583 else if (res->owner == DLM_LOCK_RES_OWNER_UNKNOWN)
584 atomic_dec(&dlm->unknown_resources);
586 atomic_dec(&dlm->remote_resources);
588 dlm_set_lockres_owner(dlm, res, owner);
592 static void dlm_lockres_release(struct kref *kref)
594 struct dlm_lock_resource *res;
596 res = container_of(kref, struct dlm_lock_resource, refs);
598 /* This should not happen -- all lockres' have a name
599 * associated with them at init time. */
600 BUG_ON(!res->lockname.name);
602 mlog(0, "destroying lockres %.*s\n", res->lockname.len,
605 if (!list_empty(&res->tracking))
606 list_del_init(&res->tracking);
608 mlog(ML_ERROR, "Resource %.*s not on the Tracking list\n",
609 res->lockname.len, res->lockname.name);
610 dlm_print_one_lock_resource(res);
613 if (!hlist_unhashed(&res->hash_node) ||
614 !list_empty(&res->granted) ||
615 !list_empty(&res->converting) ||
616 !list_empty(&res->blocked) ||
617 !list_empty(&res->dirty) ||
618 !list_empty(&res->recovering) ||
619 !list_empty(&res->purge)) {
621 "Going to BUG for resource %.*s."
622 " We're on a list! [%c%c%c%c%c%c%c]\n",
623 res->lockname.len, res->lockname.name,
624 !hlist_unhashed(&res->hash_node) ? 'H' : ' ',
625 !list_empty(&res->granted) ? 'G' : ' ',
626 !list_empty(&res->converting) ? 'C' : ' ',
627 !list_empty(&res->blocked) ? 'B' : ' ',
628 !list_empty(&res->dirty) ? 'D' : ' ',
629 !list_empty(&res->recovering) ? 'R' : ' ',
630 !list_empty(&res->purge) ? 'P' : ' ');
632 dlm_print_one_lock_resource(res);
635 /* By the time we're ready to blow this guy away, we shouldn't
636 * be on any lists. */
637 BUG_ON(!hlist_unhashed(&res->hash_node));
638 BUG_ON(!list_empty(&res->granted));
639 BUG_ON(!list_empty(&res->converting));
640 BUG_ON(!list_empty(&res->blocked));
641 BUG_ON(!list_empty(&res->dirty));
642 BUG_ON(!list_empty(&res->recovering));
643 BUG_ON(!list_empty(&res->purge));
645 kmem_cache_free(dlm_lockname_cache, (void *)res->lockname.name);
647 kmem_cache_free(dlm_lockres_cache, res);
650 void dlm_lockres_put(struct dlm_lock_resource *res)
652 kref_put(&res->refs, dlm_lockres_release);
655 static void dlm_init_lockres(struct dlm_ctxt *dlm,
656 struct dlm_lock_resource *res,
657 const char *name, unsigned int namelen)
661 /* If we memset here, we lose our reference to the kmalloc'd
662 * res->lockname.name, so be sure to init every field
665 qname = (char *) res->lockname.name;
666 memcpy(qname, name, namelen);
668 res->lockname.len = namelen;
669 res->lockname.hash = dlm_lockid_hash(name, namelen);
671 init_waitqueue_head(&res->wq);
672 spin_lock_init(&res->spinlock);
673 INIT_HLIST_NODE(&res->hash_node);
674 INIT_LIST_HEAD(&res->granted);
675 INIT_LIST_HEAD(&res->converting);
676 INIT_LIST_HEAD(&res->blocked);
677 INIT_LIST_HEAD(&res->dirty);
678 INIT_LIST_HEAD(&res->recovering);
679 INIT_LIST_HEAD(&res->purge);
680 INIT_LIST_HEAD(&res->tracking);
681 atomic_set(&res->asts_reserved, 0);
682 res->migration_pending = 0;
683 res->inflight_locks = 0;
685 kref_init(&res->refs);
687 /* just for consistency */
688 spin_lock(&res->spinlock);
689 dlm_set_lockres_owner(dlm, res, DLM_LOCK_RES_OWNER_UNKNOWN);
690 spin_unlock(&res->spinlock);
692 res->state = DLM_LOCK_RES_IN_PROGRESS;
696 list_add_tail(&res->tracking, &dlm->tracking_list);
698 memset(res->lvb, 0, DLM_LVB_LEN);
699 memset(res->refmap, 0, sizeof(res->refmap));
702 struct dlm_lock_resource *dlm_new_lockres(struct dlm_ctxt *dlm,
704 unsigned int namelen)
706 struct dlm_lock_resource *res = NULL;
708 res = (struct dlm_lock_resource *)
709 kmem_cache_zalloc(dlm_lockres_cache, GFP_NOFS);
713 res->lockname.name = (char *)
714 kmem_cache_zalloc(dlm_lockname_cache, GFP_NOFS);
715 if (!res->lockname.name)
718 dlm_init_lockres(dlm, res, name, namelen);
722 if (res && res->lockname.name)
723 kmem_cache_free(dlm_lockname_cache, (void *)res->lockname.name);
726 kmem_cache_free(dlm_lockres_cache, res);
730 void __dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm,
731 struct dlm_lock_resource *res,
737 assert_spin_locked(&res->spinlock);
739 if (!test_bit(dlm->node_num, res->refmap)) {
740 BUG_ON(res->inflight_locks != 0);
741 dlm_lockres_set_refmap_bit(dlm->node_num, res);
743 res->inflight_locks++;
744 mlog(0, "%s:%.*s: inflight++: now %u\n",
745 dlm->name, res->lockname.len, res->lockname.name,
746 res->inflight_locks);
749 void __dlm_lockres_drop_inflight_ref(struct dlm_ctxt *dlm,
750 struct dlm_lock_resource *res,
754 assert_spin_locked(&res->spinlock);
756 BUG_ON(res->inflight_locks == 0);
757 res->inflight_locks--;
758 mlog(0, "%s:%.*s: inflight--: now %u\n",
759 dlm->name, res->lockname.len, res->lockname.name,
760 res->inflight_locks);
761 if (res->inflight_locks == 0)
762 dlm_lockres_clear_refmap_bit(dlm->node_num, res);
767 * lookup a lock resource by name.
768 * may already exist in the hashtable.
769 * lockid is null terminated
771 * if not, allocate enough for the lockres and for
772 * the temporary structure used in doing the mastering.
774 * also, do a lookup in the dlm->master_list to see
775 * if another node has begun mastering the same lock.
776 * if so, there should be a block entry in there
777 * for this name, and we should *not* attempt to master
778 * the lock here. need to wait around for that node
779 * to assert_master (or die).
782 struct dlm_lock_resource * dlm_get_lock_resource(struct dlm_ctxt *dlm,
787 struct dlm_lock_resource *tmpres=NULL, *res=NULL;
788 struct dlm_master_list_entry *mle = NULL;
789 struct dlm_master_list_entry *alloc_mle = NULL;
792 struct dlm_node_iter iter;
795 int bit, wait_on_recovery = 0;
796 int drop_inflight_if_nonlocal = 0;
800 hash = dlm_lockid_hash(lockid, namelen);
802 mlog(0, "get lockres %s (len %d)\n", lockid, namelen);
805 spin_lock(&dlm->spinlock);
806 tmpres = __dlm_lookup_lockres_full(dlm, lockid, namelen, hash);
808 int dropping_ref = 0;
810 spin_lock(&tmpres->spinlock);
811 if (tmpres->owner == dlm->node_num) {
812 BUG_ON(tmpres->state & DLM_LOCK_RES_DROPPING_REF);
813 dlm_lockres_grab_inflight_ref(dlm, tmpres);
814 } else if (tmpres->state & DLM_LOCK_RES_DROPPING_REF)
816 spin_unlock(&tmpres->spinlock);
817 spin_unlock(&dlm->spinlock);
819 /* wait until done messaging the master, drop our ref to allow
820 * the lockres to be purged, start over. */
822 spin_lock(&tmpres->spinlock);
823 __dlm_wait_on_lockres_flags(tmpres, DLM_LOCK_RES_DROPPING_REF);
824 spin_unlock(&tmpres->spinlock);
825 dlm_lockres_put(tmpres);
830 mlog(0, "found in hash!\n");
832 dlm_lockres_put(res);
838 spin_unlock(&dlm->spinlock);
839 mlog(0, "allocating a new resource\n");
840 /* nothing found and we need to allocate one. */
841 alloc_mle = (struct dlm_master_list_entry *)
842 kmem_cache_alloc(dlm_mle_cache, GFP_NOFS);
845 res = dlm_new_lockres(dlm, lockid, namelen);
851 mlog(0, "no lockres found, allocated our own: %p\n", res);
853 if (flags & LKM_LOCAL) {
854 /* caller knows it's safe to assume it's not mastered elsewhere
855 * DONE! return right away */
856 spin_lock(&res->spinlock);
857 dlm_change_lockres_owner(dlm, res, dlm->node_num);
858 __dlm_insert_lockres(dlm, res);
859 dlm_lockres_grab_inflight_ref(dlm, res);
860 spin_unlock(&res->spinlock);
861 spin_unlock(&dlm->spinlock);
862 /* lockres still marked IN_PROGRESS */
866 /* check master list to see if another node has started mastering it */
867 spin_lock(&dlm->master_lock);
869 /* if we found a block, wait for lock to be mastered by another node */
870 blocked = dlm_find_mle(dlm, &mle, (char *)lockid, namelen);
873 if (mle->type == DLM_MLE_MASTER) {
874 mlog(ML_ERROR, "master entry for nonexistent lock!\n");
877 mig = (mle->type == DLM_MLE_MIGRATION);
878 /* if there is a migration in progress, let the migration
879 * finish before continuing. we can wait for the absence
880 * of the MIGRATION mle: either the migrate finished or
881 * one of the nodes died and the mle was cleaned up.
882 * if there is a BLOCK here, but it already has a master
883 * set, we are too late. the master does not have a ref
884 * for us in the refmap. detach the mle and drop it.
885 * either way, go back to the top and start over. */
886 if (mig || mle->master != O2NM_MAX_NODES) {
887 BUG_ON(mig && mle->master == dlm->node_num);
888 /* we arrived too late. the master does not
889 * have a ref for us. retry. */
890 mlog(0, "%s:%.*s: late on %s\n",
891 dlm->name, namelen, lockid,
892 mig ? "MIGRATION" : "BLOCK");
893 spin_unlock(&dlm->master_lock);
894 spin_unlock(&dlm->spinlock);
896 /* master is known, detach */
898 dlm_mle_detach_hb_events(dlm, mle);
901 /* this is lame, but we cant wait on either
902 * the mle or lockres waitqueue here */
908 /* go ahead and try to master lock on this node */
910 /* make sure this does not get freed below */
912 dlm_init_mle(mle, DLM_MLE_MASTER, dlm, res, NULL, 0);
913 set_bit(dlm->node_num, mle->maybe_map);
914 list_add(&mle->list, &dlm->master_list);
916 /* still holding the dlm spinlock, check the recovery map
917 * to see if there are any nodes that still need to be
918 * considered. these will not appear in the mle nodemap
919 * but they might own this lockres. wait on them. */
920 bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0);
921 if (bit < O2NM_MAX_NODES) {
922 mlog(ML_NOTICE, "%s:%.*s: at least one node (%d) to "
923 "recover before lock mastery can begin\n",
924 dlm->name, namelen, (char *)lockid, bit);
925 wait_on_recovery = 1;
929 /* at this point there is either a DLM_MLE_BLOCK or a
930 * DLM_MLE_MASTER on the master list, so it's safe to add the
931 * lockres to the hashtable. anyone who finds the lock will
932 * still have to wait on the IN_PROGRESS. */
934 /* finally add the lockres to its hash bucket */
935 __dlm_insert_lockres(dlm, res);
936 /* since this lockres is new it doesnt not require the spinlock */
937 dlm_lockres_grab_inflight_ref_new(dlm, res);
939 /* if this node does not become the master make sure to drop
940 * this inflight reference below */
941 drop_inflight_if_nonlocal = 1;
943 /* get an extra ref on the mle in case this is a BLOCK
944 * if so, the creator of the BLOCK may try to put the last
945 * ref at this time in the assert master handler, so we
946 * need an extra one to keep from a bad ptr deref. */
947 dlm_get_mle_inuse(mle);
948 spin_unlock(&dlm->master_lock);
949 spin_unlock(&dlm->spinlock);
952 while (wait_on_recovery) {
953 /* any cluster changes that occurred after dropping the
954 * dlm spinlock would be detectable be a change on the mle,
955 * so we only need to clear out the recovery map once. */
956 if (dlm_is_recovery_lock(lockid, namelen)) {
957 mlog(ML_NOTICE, "%s: recovery map is not empty, but "
958 "must master $RECOVERY lock now\n", dlm->name);
959 if (!dlm_pre_master_reco_lockres(dlm, res))
960 wait_on_recovery = 0;
962 mlog(0, "%s: waiting 500ms for heartbeat state "
963 "change\n", dlm->name);
969 dlm_kick_recovery_thread(dlm);
971 dlm_wait_for_recovery(dlm);
973 spin_lock(&dlm->spinlock);
974 bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0);
975 if (bit < O2NM_MAX_NODES) {
976 mlog(ML_NOTICE, "%s:%.*s: at least one node (%d) to "
977 "recover before lock mastery can begin\n",
978 dlm->name, namelen, (char *)lockid, bit);
979 wait_on_recovery = 1;
981 wait_on_recovery = 0;
982 spin_unlock(&dlm->spinlock);
984 if (wait_on_recovery)
985 dlm_wait_for_node_recovery(dlm, bit, 10000);
988 /* must wait for lock to be mastered elsewhere */
993 dlm_node_iter_init(mle->vote_map, &iter);
994 while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
995 ret = dlm_do_master_request(res, mle, nodenum);
998 if (mle->master != O2NM_MAX_NODES) {
999 /* found a master ! */
1000 if (mle->master <= nodenum)
1002 /* if our master request has not reached the master
1003 * yet, keep going until it does. this is how the
1004 * master will know that asserts are needed back to
1005 * the lower nodes. */
1006 mlog(0, "%s:%.*s: requests only up to %u but master "
1007 "is %u, keep going\n", dlm->name, namelen,
1008 lockid, nodenum, mle->master);
1013 /* keep going until the response map includes all nodes */
1014 ret = dlm_wait_for_lock_mastery(dlm, res, mle, &blocked);
1016 wait_on_recovery = 1;
1017 mlog(0, "%s:%.*s: node map changed, redo the "
1018 "master request now, blocked=%d\n",
1019 dlm->name, res->lockname.len,
1020 res->lockname.name, blocked);
1022 mlog(ML_ERROR, "%s:%.*s: spinning on "
1023 "dlm_wait_for_lock_mastery, blocked=%d\n",
1024 dlm->name, res->lockname.len,
1025 res->lockname.name, blocked);
1026 dlm_print_one_lock_resource(res);
1027 dlm_print_one_mle(mle);
1033 mlog(0, "lockres mastered by %u\n", res->owner);
1034 /* make sure we never continue without this */
1035 BUG_ON(res->owner == O2NM_MAX_NODES);
1037 /* master is known, detach if not already detached */
1038 dlm_mle_detach_hb_events(dlm, mle);
1040 /* put the extra ref */
1041 dlm_put_mle_inuse(mle);
1044 spin_lock(&res->spinlock);
1045 if (res->owner != dlm->node_num && drop_inflight_if_nonlocal)
1046 dlm_lockres_drop_inflight_ref(dlm, res);
1047 res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
1048 spin_unlock(&res->spinlock);
1052 /* need to free the unused mle */
1054 kmem_cache_free(dlm_mle_cache, alloc_mle);
1060 #define DLM_MASTERY_TIMEOUT_MS 5000
1062 static int dlm_wait_for_lock_mastery(struct dlm_ctxt *dlm,
1063 struct dlm_lock_resource *res,
1064 struct dlm_master_list_entry *mle,
1069 int map_changed, voting_done;
1076 /* check if another node has already become the owner */
1077 spin_lock(&res->spinlock);
1078 if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) {
1079 mlog(0, "%s:%.*s: owner is suddenly %u\n", dlm->name,
1080 res->lockname.len, res->lockname.name, res->owner);
1081 spin_unlock(&res->spinlock);
1082 /* this will cause the master to re-assert across
1083 * the whole cluster, freeing up mles */
1084 if (res->owner != dlm->node_num) {
1085 ret = dlm_do_master_request(res, mle, res->owner);
1087 /* give recovery a chance to run */
1088 mlog(ML_ERROR, "link to %u went down?: %d\n", res->owner, ret);
1096 spin_unlock(&res->spinlock);
1098 spin_lock(&mle->spinlock);
1100 map_changed = (memcmp(mle->vote_map, mle->node_map,
1101 sizeof(mle->vote_map)) != 0);
1102 voting_done = (memcmp(mle->vote_map, mle->response_map,
1103 sizeof(mle->vote_map)) == 0);
1105 /* restart if we hit any errors */
1108 mlog(0, "%s: %.*s: node map changed, restarting\n",
1109 dlm->name, res->lockname.len, res->lockname.name);
1110 ret = dlm_restart_lock_mastery(dlm, res, mle, *blocked);
1111 b = (mle->type == DLM_MLE_BLOCK);
1112 if ((*blocked && !b) || (!*blocked && b)) {
1113 mlog(0, "%s:%.*s: status change: old=%d new=%d\n",
1114 dlm->name, res->lockname.len, res->lockname.name,
1118 spin_unlock(&mle->spinlock);
1123 mlog(0, "%s:%.*s: restart lock mastery succeeded, "
1124 "rechecking now\n", dlm->name, res->lockname.len,
1125 res->lockname.name);
1129 mlog(0, "map not changed and voting not done "
1130 "for %s:%.*s\n", dlm->name, res->lockname.len,
1131 res->lockname.name);
1135 if (m != O2NM_MAX_NODES) {
1136 /* another node has done an assert!
1141 /* have all nodes responded? */
1142 if (voting_done && !*blocked) {
1143 bit = find_next_bit(mle->maybe_map, O2NM_MAX_NODES, 0);
1144 if (dlm->node_num <= bit) {
1145 /* my node number is lowest.
1146 * now tell other nodes that I am
1147 * mastering this. */
1148 mle->master = dlm->node_num;
1149 /* ref was grabbed in get_lock_resource
1150 * will be dropped in dlmlock_master */
1154 /* if voting is done, but we have not received
1155 * an assert master yet, we must sleep */
1159 spin_unlock(&mle->spinlock);
1161 /* sleep if we haven't finished voting yet */
1163 unsigned long timeo = msecs_to_jiffies(DLM_MASTERY_TIMEOUT_MS);
1166 if (atomic_read(&mle->mle_refs.refcount) < 2)
1167 mlog(ML_ERROR, "mle (%p) refs=%d, name=%.*s\n", mle,
1168 atomic_read(&mle->mle_refs.refcount),
1169 res->lockname.len, res->lockname.name);
1171 atomic_set(&mle->woken, 0);
1172 (void)wait_event_timeout(mle->wq,
1173 (atomic_read(&mle->woken) == 1),
1175 if (res->owner == O2NM_MAX_NODES) {
1176 mlog(0, "%s:%.*s: waiting again\n", dlm->name,
1177 res->lockname.len, res->lockname.name);
1180 mlog(0, "done waiting, master is %u\n", res->owner);
1188 mlog(0, "about to master %.*s here, this=%u\n",
1189 res->lockname.len, res->lockname.name, m);
1190 ret = dlm_do_assert_master(dlm, res, mle->vote_map, 0);
1192 /* This is a failure in the network path,
1193 * not in the response to the assert_master
1194 * (any nonzero response is a BUG on this node).
1195 * Most likely a socket just got disconnected
1196 * due to node death. */
1199 /* no longer need to restart lock mastery.
1200 * all living nodes have been contacted. */
1204 /* set the lockres owner */
1205 spin_lock(&res->spinlock);
1206 /* mastery reference obtained either during
1207 * assert_master_handler or in get_lock_resource */
1208 dlm_change_lockres_owner(dlm, res, m);
1209 spin_unlock(&res->spinlock);
1215 struct dlm_bitmap_diff_iter
1218 unsigned long *orig_bm;
1219 unsigned long *cur_bm;
1220 unsigned long diff_bm[BITS_TO_LONGS(O2NM_MAX_NODES)];
1223 enum dlm_node_state_change
1230 static void dlm_bitmap_diff_iter_init(struct dlm_bitmap_diff_iter *iter,
1231 unsigned long *orig_bm,
1232 unsigned long *cur_bm)
1234 unsigned long p1, p2;
1238 iter->orig_bm = orig_bm;
1239 iter->cur_bm = cur_bm;
1241 for (i = 0; i < BITS_TO_LONGS(O2NM_MAX_NODES); i++) {
1242 p1 = *(iter->orig_bm + i);
1243 p2 = *(iter->cur_bm + i);
1244 iter->diff_bm[i] = (p1 & ~p2) | (p2 & ~p1);
1248 static int dlm_bitmap_diff_iter_next(struct dlm_bitmap_diff_iter *iter,
1249 enum dlm_node_state_change *state)
1253 if (iter->curnode >= O2NM_MAX_NODES)
1256 bit = find_next_bit(iter->diff_bm, O2NM_MAX_NODES,
1258 if (bit >= O2NM_MAX_NODES) {
1259 iter->curnode = O2NM_MAX_NODES;
1263 /* if it was there in the original then this node died */
1264 if (test_bit(bit, iter->orig_bm))
1269 iter->curnode = bit;
1274 static int dlm_restart_lock_mastery(struct dlm_ctxt *dlm,
1275 struct dlm_lock_resource *res,
1276 struct dlm_master_list_entry *mle,
1279 struct dlm_bitmap_diff_iter bdi;
1280 enum dlm_node_state_change sc;
1284 mlog(0, "something happened such that the "
1285 "master process may need to be restarted!\n");
1287 assert_spin_locked(&mle->spinlock);
1289 dlm_bitmap_diff_iter_init(&bdi, mle->vote_map, mle->node_map);
1290 node = dlm_bitmap_diff_iter_next(&bdi, &sc);
1292 if (sc == NODE_UP) {
1293 /* a node came up. clear any old vote from
1294 * the response map and set it in the vote map
1295 * then restart the mastery. */
1296 mlog(ML_NOTICE, "node %d up while restarting\n", node);
1298 /* redo the master request, but only for the new node */
1299 mlog(0, "sending request to new node\n");
1300 clear_bit(node, mle->response_map);
1301 set_bit(node, mle->vote_map);
1303 mlog(ML_ERROR, "node down! %d\n", node);
1305 int lowest = find_next_bit(mle->maybe_map,
1308 /* act like it was never there */
1309 clear_bit(node, mle->maybe_map);
1311 if (node == lowest) {
1312 mlog(0, "expected master %u died"
1313 " while this node was blocked "
1314 "waiting on it!\n", node);
1315 lowest = find_next_bit(mle->maybe_map,
1318 if (lowest < O2NM_MAX_NODES) {
1319 mlog(0, "%s:%.*s:still "
1320 "blocked. waiting on %u "
1326 /* mle is an MLE_BLOCK, but
1327 * there is now nothing left to
1328 * block on. we need to return
1329 * all the way back out and try
1330 * again with an MLE_MASTER.
1331 * dlm_do_local_recovery_cleanup
1332 * has already run, so the mle
1334 mlog(0, "%s:%.*s: no "
1335 "longer blocking. try to "
1336 "master this here\n",
1339 res->lockname.name);
1340 mle->type = DLM_MLE_MASTER;
1346 /* now blank out everything, as if we had never
1347 * contacted anyone */
1348 memset(mle->maybe_map, 0, sizeof(mle->maybe_map));
1349 memset(mle->response_map, 0, sizeof(mle->response_map));
1350 /* reset the vote_map to the current node_map */
1351 memcpy(mle->vote_map, mle->node_map,
1352 sizeof(mle->node_map));
1353 /* put myself into the maybe map */
1354 if (mle->type != DLM_MLE_BLOCK)
1355 set_bit(dlm->node_num, mle->maybe_map);
1358 node = dlm_bitmap_diff_iter_next(&bdi, &sc);
1365 * DLM_MASTER_REQUEST_MSG
1367 * returns: 0 on success,
1368 * -errno on a network error
1370 * on error, the caller should assume the target node is "dead"
1374 static int dlm_do_master_request(struct dlm_lock_resource *res,
1375 struct dlm_master_list_entry *mle, int to)
1377 struct dlm_ctxt *dlm = mle->dlm;
1378 struct dlm_master_request request;
1379 int ret, response=0, resend;
1381 memset(&request, 0, sizeof(request));
1382 request.node_idx = dlm->node_num;
1384 BUG_ON(mle->type == DLM_MLE_MIGRATION);
1386 if (mle->type != DLM_MLE_MASTER) {
1387 request.namelen = mle->u.name.len;
1388 memcpy(request.name, mle->u.name.name, request.namelen);
1390 request.namelen = mle->u.res->lockname.len;
1391 memcpy(request.name, mle->u.res->lockname.name,
1396 ret = o2net_send_message(DLM_MASTER_REQUEST_MSG, dlm->key, &request,
1397 sizeof(request), to, &response);
1399 if (ret == -ESRCH) {
1400 /* should never happen */
1401 mlog(ML_ERROR, "TCP stack not ready!\n");
1403 } else if (ret == -EINVAL) {
1404 mlog(ML_ERROR, "bad args passed to o2net!\n");
1406 } else if (ret == -ENOMEM) {
1407 mlog(ML_ERROR, "out of memory while trying to send "
1408 "network message! retrying\n");
1409 /* this is totally crude */
1412 } else if (!dlm_is_host_down(ret)) {
1413 /* not a network error. bad. */
1415 mlog(ML_ERROR, "unhandled error!");
1418 /* all other errors should be network errors,
1419 * and likely indicate node death */
1420 mlog(ML_ERROR, "link to %d went down!\n", to);
1426 spin_lock(&mle->spinlock);
1428 case DLM_MASTER_RESP_YES:
1429 set_bit(to, mle->response_map);
1430 mlog(0, "node %u is the master, response=YES\n", to);
1431 mlog(0, "%s:%.*s: master node %u now knows I have a "
1432 "reference\n", dlm->name, res->lockname.len,
1433 res->lockname.name, to);
1436 case DLM_MASTER_RESP_NO:
1437 mlog(0, "node %u not master, response=NO\n", to);
1438 set_bit(to, mle->response_map);
1440 case DLM_MASTER_RESP_MAYBE:
1441 mlog(0, "node %u not master, response=MAYBE\n", to);
1442 set_bit(to, mle->response_map);
1443 set_bit(to, mle->maybe_map);
1445 case DLM_MASTER_RESP_ERROR:
1446 mlog(0, "node %u hit an error, resending\n", to);
1451 mlog(ML_ERROR, "bad response! %u\n", response);
1454 spin_unlock(&mle->spinlock);
1456 /* this is also totally crude */
1466 * locks that can be taken here:
1472 * if possible, TRIM THIS DOWN!!!
1474 int dlm_master_request_handler(struct o2net_msg *msg, u32 len, void *data,
1477 u8 response = DLM_MASTER_RESP_MAYBE;
1478 struct dlm_ctxt *dlm = data;
1479 struct dlm_lock_resource *res = NULL;
1480 struct dlm_master_request *request = (struct dlm_master_request *) msg->buf;
1481 struct dlm_master_list_entry *mle = NULL, *tmpmle = NULL;
1483 unsigned int namelen, hash;
1486 int dispatch_assert = 0;
1489 return DLM_MASTER_RESP_NO;
1491 if (!dlm_domain_fully_joined(dlm)) {
1492 response = DLM_MASTER_RESP_NO;
1496 name = request->name;
1497 namelen = request->namelen;
1498 hash = dlm_lockid_hash(name, namelen);
1500 if (namelen > DLM_LOCKID_NAME_MAX) {
1501 response = DLM_IVBUFLEN;
1506 spin_lock(&dlm->spinlock);
1507 res = __dlm_lookup_lockres(dlm, name, namelen, hash);
1509 spin_unlock(&dlm->spinlock);
1511 /* take care of the easy cases up front */
1512 spin_lock(&res->spinlock);
1513 if (res->state & (DLM_LOCK_RES_RECOVERING|
1514 DLM_LOCK_RES_MIGRATING)) {
1515 spin_unlock(&res->spinlock);
1516 mlog(0, "returning DLM_MASTER_RESP_ERROR since res is "
1517 "being recovered/migrated\n");
1518 response = DLM_MASTER_RESP_ERROR;
1520 kmem_cache_free(dlm_mle_cache, mle);
1524 if (res->owner == dlm->node_num) {
1525 mlog(0, "%s:%.*s: setting bit %u in refmap\n",
1526 dlm->name, namelen, name, request->node_idx);
1527 dlm_lockres_set_refmap_bit(request->node_idx, res);
1528 spin_unlock(&res->spinlock);
1529 response = DLM_MASTER_RESP_YES;
1531 kmem_cache_free(dlm_mle_cache, mle);
1533 /* this node is the owner.
1534 * there is some extra work that needs to
1535 * happen now. the requesting node has
1536 * caused all nodes up to this one to
1537 * create mles. this node now needs to
1538 * go back and clean those up. */
1539 dispatch_assert = 1;
1541 } else if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) {
1542 spin_unlock(&res->spinlock);
1543 // mlog(0, "node %u is the master\n", res->owner);
1544 response = DLM_MASTER_RESP_NO;
1546 kmem_cache_free(dlm_mle_cache, mle);
1550 /* ok, there is no owner. either this node is
1551 * being blocked, or it is actively trying to
1552 * master this lock. */
1553 if (!(res->state & DLM_LOCK_RES_IN_PROGRESS)) {
1554 mlog(ML_ERROR, "lock with no owner should be "
1559 // mlog(0, "lockres is in progress...\n");
1560 spin_lock(&dlm->master_lock);
1561 found = dlm_find_mle(dlm, &tmpmle, name, namelen);
1563 mlog(ML_ERROR, "no mle found for this lock!\n");
1567 spin_lock(&tmpmle->spinlock);
1568 if (tmpmle->type == DLM_MLE_BLOCK) {
1569 // mlog(0, "this node is waiting for "
1570 // "lockres to be mastered\n");
1571 response = DLM_MASTER_RESP_NO;
1572 } else if (tmpmle->type == DLM_MLE_MIGRATION) {
1573 mlog(0, "node %u is master, but trying to migrate to "
1574 "node %u.\n", tmpmle->master, tmpmle->new_master);
1575 if (tmpmle->master == dlm->node_num) {
1576 mlog(ML_ERROR, "no owner on lockres, but this "
1577 "node is trying to migrate it to %u?!\n",
1578 tmpmle->new_master);
1581 /* the real master can respond on its own */
1582 response = DLM_MASTER_RESP_NO;
1584 } else if (tmpmle->master != DLM_LOCK_RES_OWNER_UNKNOWN) {
1586 if (tmpmle->master == dlm->node_num) {
1587 response = DLM_MASTER_RESP_YES;
1588 /* this node will be the owner.
1589 * go back and clean the mles on any
1591 dispatch_assert = 1;
1592 dlm_lockres_set_refmap_bit(request->node_idx, res);
1593 mlog(0, "%s:%.*s: setting bit %u in refmap\n",
1594 dlm->name, namelen, name,
1597 response = DLM_MASTER_RESP_NO;
1599 // mlog(0, "this node is attempting to "
1600 // "master lockres\n");
1601 response = DLM_MASTER_RESP_MAYBE;
1604 set_bit(request->node_idx, tmpmle->maybe_map);
1605 spin_unlock(&tmpmle->spinlock);
1607 spin_unlock(&dlm->master_lock);
1608 spin_unlock(&res->spinlock);
1610 /* keep the mle attached to heartbeat events */
1611 dlm_put_mle(tmpmle);
1613 kmem_cache_free(dlm_mle_cache, mle);
1618 * lockres doesn't exist on this node
1619 * if there is an MLE_BLOCK, return NO
1620 * if there is an MLE_MASTER, return MAYBE
1621 * otherwise, add an MLE_BLOCK, return NO
1623 spin_lock(&dlm->master_lock);
1624 found = dlm_find_mle(dlm, &tmpmle, name, namelen);
1626 /* this lockid has never been seen on this node yet */
1627 // mlog(0, "no mle found\n");
1629 spin_unlock(&dlm->master_lock);
1630 spin_unlock(&dlm->spinlock);
1632 mle = (struct dlm_master_list_entry *)
1633 kmem_cache_alloc(dlm_mle_cache, GFP_NOFS);
1635 response = DLM_MASTER_RESP_ERROR;
1636 mlog_errno(-ENOMEM);
1642 // mlog(0, "this is second time thru, already allocated, "
1643 // "add the block.\n");
1644 dlm_init_mle(mle, DLM_MLE_BLOCK, dlm, NULL, name, namelen);
1645 set_bit(request->node_idx, mle->maybe_map);
1646 list_add(&mle->list, &dlm->master_list);
1647 response = DLM_MASTER_RESP_NO;
1649 // mlog(0, "mle was found\n");
1651 spin_lock(&tmpmle->spinlock);
1652 if (tmpmle->master == dlm->node_num) {
1653 mlog(ML_ERROR, "no lockres, but an mle with this node as master!\n");
1656 if (tmpmle->type == DLM_MLE_BLOCK)
1657 response = DLM_MASTER_RESP_NO;
1658 else if (tmpmle->type == DLM_MLE_MIGRATION) {
1659 mlog(0, "migration mle was found (%u->%u)\n",
1660 tmpmle->master, tmpmle->new_master);
1661 /* real master can respond on its own */
1662 response = DLM_MASTER_RESP_NO;
1664 response = DLM_MASTER_RESP_MAYBE;
1666 set_bit(request->node_idx, tmpmle->maybe_map);
1667 spin_unlock(&tmpmle->spinlock);
1669 spin_unlock(&dlm->master_lock);
1670 spin_unlock(&dlm->spinlock);
1673 /* keep the mle attached to heartbeat events */
1674 dlm_put_mle(tmpmle);
1678 * __dlm_lookup_lockres() grabbed a reference to this lockres.
1679 * The reference is released by dlm_assert_master_worker() under
1680 * the call to dlm_dispatch_assert_master(). If
1681 * dlm_assert_master_worker() isn't called, we drop it here.
1683 if (dispatch_assert) {
1684 if (response != DLM_MASTER_RESP_YES)
1685 mlog(ML_ERROR, "invalid response %d\n", response);
1687 mlog(ML_ERROR, "bad lockres while trying to assert!\n");
1690 mlog(0, "%u is the owner of %.*s, cleaning everyone else\n",
1691 dlm->node_num, res->lockname.len, res->lockname.name);
1692 ret = dlm_dispatch_assert_master(dlm, res, 0, request->node_idx,
1693 DLM_ASSERT_MASTER_MLE_CLEANUP);
1695 mlog(ML_ERROR, "failed to dispatch assert master work\n");
1696 response = DLM_MASTER_RESP_ERROR;
1697 dlm_lockres_put(res);
1701 dlm_lockres_put(res);
1709 * DLM_ASSERT_MASTER_MSG
1714 * NOTE: this can be used for debugging
1715 * can periodically run all locks owned by this node
1716 * and re-assert across the cluster...
1718 static int dlm_do_assert_master(struct dlm_ctxt *dlm,
1719 struct dlm_lock_resource *res,
1720 void *nodemap, u32 flags)
1722 struct dlm_assert_master assert;
1724 struct dlm_node_iter iter;
1727 const char *lockname = res->lockname.name;
1728 unsigned int namelen = res->lockname.len;
1730 BUG_ON(namelen > O2NM_MAX_NAME_LEN);
1732 spin_lock(&res->spinlock);
1733 res->state |= DLM_LOCK_RES_SETREF_INPROG;
1734 spin_unlock(&res->spinlock);
1739 /* note that if this nodemap is empty, it returns 0 */
1740 dlm_node_iter_init(nodemap, &iter);
1741 while ((to = dlm_node_iter_next(&iter)) >= 0) {
1743 struct dlm_master_list_entry *mle = NULL;
1745 mlog(0, "sending assert master to %d (%.*s)\n", to,
1747 memset(&assert, 0, sizeof(assert));
1748 assert.node_idx = dlm->node_num;
1749 assert.namelen = namelen;
1750 memcpy(assert.name, lockname, namelen);
1751 assert.flags = cpu_to_be32(flags);
1753 tmpret = o2net_send_message(DLM_ASSERT_MASTER_MSG, dlm->key,
1754 &assert, sizeof(assert), to, &r);
1756 mlog(0, "assert_master returned %d!\n", tmpret);
1757 if (!dlm_is_host_down(tmpret)) {
1758 mlog(ML_ERROR, "unhandled error=%d!\n", tmpret);
1761 /* a node died. finish out the rest of the nodes. */
1762 mlog(0, "link to %d went down!\n", to);
1763 /* any nonzero status return will do */
1767 /* ok, something horribly messed. kill thyself. */
1768 mlog(ML_ERROR,"during assert master of %.*s to %u, "
1769 "got %d.\n", namelen, lockname, to, r);
1770 spin_lock(&dlm->spinlock);
1771 spin_lock(&dlm->master_lock);
1772 if (dlm_find_mle(dlm, &mle, (char *)lockname,
1774 dlm_print_one_mle(mle);
1777 spin_unlock(&dlm->master_lock);
1778 spin_unlock(&dlm->spinlock);
1782 if (r & DLM_ASSERT_RESPONSE_REASSERT &&
1783 !(r & DLM_ASSERT_RESPONSE_MASTERY_REF)) {
1784 mlog(ML_ERROR, "%.*s: very strange, "
1785 "master MLE but no lockres on %u\n",
1786 namelen, lockname, to);
1789 if (r & DLM_ASSERT_RESPONSE_REASSERT) {
1790 mlog(0, "%.*s: node %u create mles on other "
1791 "nodes and requests a re-assert\n",
1792 namelen, lockname, to);
1795 if (r & DLM_ASSERT_RESPONSE_MASTERY_REF) {
1796 mlog(0, "%.*s: node %u has a reference to this "
1797 "lockres, set the bit in the refmap\n",
1798 namelen, lockname, to);
1799 spin_lock(&res->spinlock);
1800 dlm_lockres_set_refmap_bit(to, res);
1801 spin_unlock(&res->spinlock);
1808 spin_lock(&res->spinlock);
1809 res->state &= ~DLM_LOCK_RES_SETREF_INPROG;
1810 spin_unlock(&res->spinlock);
1817 * locks that can be taken here:
1823 * if possible, TRIM THIS DOWN!!!
1825 int dlm_assert_master_handler(struct o2net_msg *msg, u32 len, void *data,
1828 struct dlm_ctxt *dlm = data;
1829 struct dlm_master_list_entry *mle = NULL;
1830 struct dlm_assert_master *assert = (struct dlm_assert_master *)msg->buf;
1831 struct dlm_lock_resource *res = NULL;
1833 unsigned int namelen, hash;
1835 int master_request = 0, have_lockres_ref = 0;
1841 name = assert->name;
1842 namelen = assert->namelen;
1843 hash = dlm_lockid_hash(name, namelen);
1844 flags = be32_to_cpu(assert->flags);
1846 if (namelen > DLM_LOCKID_NAME_MAX) {
1847 mlog(ML_ERROR, "Invalid name length!");
1851 spin_lock(&dlm->spinlock);
1854 mlog(0, "assert_master with flags: %u\n", flags);
1857 spin_lock(&dlm->master_lock);
1858 if (!dlm_find_mle(dlm, &mle, name, namelen)) {
1859 /* not an error, could be master just re-asserting */
1860 mlog(0, "just got an assert_master from %u, but no "
1861 "MLE for it! (%.*s)\n", assert->node_idx,
1864 int bit = find_next_bit (mle->maybe_map, O2NM_MAX_NODES, 0);
1865 if (bit >= O2NM_MAX_NODES) {
1866 /* not necessarily an error, though less likely.
1867 * could be master just re-asserting. */
1868 mlog(0, "no bits set in the maybe_map, but %u "
1869 "is asserting! (%.*s)\n", assert->node_idx,
1871 } else if (bit != assert->node_idx) {
1872 if (flags & DLM_ASSERT_MASTER_MLE_CLEANUP) {
1873 mlog(0, "master %u was found, %u should "
1874 "back off\n", assert->node_idx, bit);
1876 /* with the fix for bug 569, a higher node
1877 * number winning the mastery will respond
1878 * YES to mastery requests, but this node
1879 * had no way of knowing. let it pass. */
1880 mlog(0, "%u is the lowest node, "
1881 "%u is asserting. (%.*s) %u must "
1882 "have begun after %u won.\n", bit,
1883 assert->node_idx, namelen, name, bit,
1887 if (mle->type == DLM_MLE_MIGRATION) {
1888 if (flags & DLM_ASSERT_MASTER_MLE_CLEANUP) {
1889 mlog(0, "%s:%.*s: got cleanup assert"
1890 " from %u for migration\n",
1891 dlm->name, namelen, name,
1893 } else if (!(flags & DLM_ASSERT_MASTER_FINISH_MIGRATION)) {
1894 mlog(0, "%s:%.*s: got unrelated assert"
1895 " from %u for migration, ignoring\n",
1896 dlm->name, namelen, name,
1899 spin_unlock(&dlm->master_lock);
1900 spin_unlock(&dlm->spinlock);
1905 spin_unlock(&dlm->master_lock);
1907 /* ok everything checks out with the MLE
1908 * now check to see if there is a lockres */
1909 res = __dlm_lookup_lockres(dlm, name, namelen, hash);
1911 spin_lock(&res->spinlock);
1912 if (res->state & DLM_LOCK_RES_RECOVERING) {
1913 mlog(ML_ERROR, "%u asserting but %.*s is "
1914 "RECOVERING!\n", assert->node_idx, namelen, name);
1918 if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN &&
1919 res->owner != assert->node_idx) {
1920 mlog(ML_ERROR, "assert_master from "
1921 "%u, but current owner is "
1923 assert->node_idx, res->owner,
1927 } else if (mle->type != DLM_MLE_MIGRATION) {
1928 if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) {
1929 /* owner is just re-asserting */
1930 if (res->owner == assert->node_idx) {
1931 mlog(0, "owner %u re-asserting on "
1932 "lock %.*s\n", assert->node_idx,
1936 mlog(ML_ERROR, "got assert_master from "
1937 "node %u, but %u is the owner! "
1938 "(%.*s)\n", assert->node_idx,
1939 res->owner, namelen, name);
1942 if (!(res->state & DLM_LOCK_RES_IN_PROGRESS)) {
1943 mlog(ML_ERROR, "got assert from %u, but lock "
1944 "with no owner should be "
1945 "in-progress! (%.*s)\n",
1950 } else /* mle->type == DLM_MLE_MIGRATION */ {
1951 /* should only be getting an assert from new master */
1952 if (assert->node_idx != mle->new_master) {
1953 mlog(ML_ERROR, "got assert from %u, but "
1954 "new master is %u, and old master "
1956 assert->node_idx, mle->new_master,
1957 mle->master, namelen, name);
1963 spin_unlock(&res->spinlock);
1965 spin_unlock(&dlm->spinlock);
1967 // mlog(0, "woo! got an assert_master from node %u!\n",
1968 // assert->node_idx);
1974 spin_lock(&mle->spinlock);
1975 if (mle->type == DLM_MLE_BLOCK || mle->type == DLM_MLE_MIGRATION)
1978 /* MASTER mle: if any bits set in the response map
1979 * then the calling node needs to re-assert to clear
1980 * up nodes that this node contacted */
1981 while ((nn = find_next_bit (mle->response_map, O2NM_MAX_NODES,
1982 nn+1)) < O2NM_MAX_NODES) {
1983 if (nn != dlm->node_num && nn != assert->node_idx)
1987 mle->master = assert->node_idx;
1988 atomic_set(&mle->woken, 1);
1990 spin_unlock(&mle->spinlock);
1994 spin_lock(&res->spinlock);
1995 if (mle->type == DLM_MLE_MIGRATION) {
1996 mlog(0, "finishing off migration of lockres %.*s, "
1998 res->lockname.len, res->lockname.name,
1999 dlm->node_num, mle->new_master);
2000 res->state &= ~DLM_LOCK_RES_MIGRATING;
2002 dlm_change_lockres_owner(dlm, res, mle->new_master);
2003 BUG_ON(res->state & DLM_LOCK_RES_DIRTY);
2005 dlm_change_lockres_owner(dlm, res, mle->master);
2007 spin_unlock(&res->spinlock);
2008 have_lockres_ref = 1;
2013 /* master is known, detach if not already detached.
2014 * ensures that only one assert_master call will happen
2016 spin_lock(&dlm->spinlock);
2017 spin_lock(&dlm->master_lock);
2019 rr = atomic_read(&mle->mle_refs.refcount);
2020 if (mle->inuse > 0) {
2021 if (extra_ref && rr < 3)
2023 else if (!extra_ref && rr < 2)
2026 if (extra_ref && rr < 2)
2028 else if (!extra_ref && rr < 1)
2032 mlog(ML_ERROR, "%s:%.*s: got assert master from %u "
2033 "that will mess up this node, refs=%d, extra=%d, "
2034 "inuse=%d\n", dlm->name, namelen, name,
2035 assert->node_idx, rr, extra_ref, mle->inuse);
2036 dlm_print_one_mle(mle);
2038 list_del_init(&mle->list);
2039 __dlm_mle_detach_hb_events(dlm, mle);
2042 /* the assert master message now balances the extra
2043 * ref given by the master / migration request message.
2044 * if this is the last put, it will be removed
2048 spin_unlock(&dlm->master_lock);
2049 spin_unlock(&dlm->spinlock);
2051 if (res->owner != assert->node_idx) {
2052 mlog(0, "assert_master from %u, but current "
2053 "owner is %u (%.*s), no mle\n", assert->node_idx,
2054 res->owner, namelen, name);
2061 spin_lock(&res->spinlock);
2062 res->state |= DLM_LOCK_RES_SETREF_INPROG;
2063 spin_unlock(&res->spinlock);
2064 *ret_data = (void *)res;
2067 if (master_request) {
2068 mlog(0, "need to tell master to reassert\n");
2069 /* positive. negative would shoot down the node. */
2070 ret |= DLM_ASSERT_RESPONSE_REASSERT;
2071 if (!have_lockres_ref) {
2072 mlog(ML_ERROR, "strange, got assert from %u, MASTER "
2073 "mle present here for %s:%.*s, but no lockres!\n",
2074 assert->node_idx, dlm->name, namelen, name);
2077 if (have_lockres_ref) {
2078 /* let the master know we have a reference to the lockres */
2079 ret |= DLM_ASSERT_RESPONSE_MASTERY_REF;
2080 mlog(0, "%s:%.*s: got assert from %u, need a ref\n",
2081 dlm->name, namelen, name, assert->node_idx);
2086 /* kill the caller! */
2087 mlog(ML_ERROR, "Bad message received from another node. Dumping state "
2088 "and killing the other node now! This node is OK and can continue.\n");
2089 __dlm_print_one_lock_resource(res);
2090 spin_unlock(&res->spinlock);
2091 spin_unlock(&dlm->spinlock);
2092 *ret_data = (void *)res;
2097 void dlm_assert_master_post_handler(int status, void *data, void *ret_data)
2099 struct dlm_lock_resource *res = (struct dlm_lock_resource *)ret_data;
2102 spin_lock(&res->spinlock);
2103 res->state &= ~DLM_LOCK_RES_SETREF_INPROG;
2104 spin_unlock(&res->spinlock);
2106 dlm_lockres_put(res);
2111 int dlm_dispatch_assert_master(struct dlm_ctxt *dlm,
2112 struct dlm_lock_resource *res,
2113 int ignore_higher, u8 request_from, u32 flags)
2115 struct dlm_work_item *item;
2116 item = kzalloc(sizeof(*item), GFP_NOFS);
2121 /* queue up work for dlm_assert_master_worker */
2122 dlm_grab(dlm); /* get an extra ref for the work item */
2123 dlm_init_work_item(dlm, item, dlm_assert_master_worker, NULL);
2124 item->u.am.lockres = res; /* already have a ref */
2125 /* can optionally ignore node numbers higher than this node */
2126 item->u.am.ignore_higher = ignore_higher;
2127 item->u.am.request_from = request_from;
2128 item->u.am.flags = flags;
2131 mlog(0, "IGNORE HIGHER: %.*s\n", res->lockname.len,
2132 res->lockname.name);
2134 spin_lock(&dlm->work_lock);
2135 list_add_tail(&item->list, &dlm->work_list);
2136 spin_unlock(&dlm->work_lock);
2138 queue_work(dlm->dlm_worker, &dlm->dispatched_work);
2142 static void dlm_assert_master_worker(struct dlm_work_item *item, void *data)
2144 struct dlm_ctxt *dlm = data;
2146 struct dlm_lock_resource *res;
2147 unsigned long nodemap[BITS_TO_LONGS(O2NM_MAX_NODES)];
2154 res = item->u.am.lockres;
2155 ignore_higher = item->u.am.ignore_higher;
2156 request_from = item->u.am.request_from;
2157 flags = item->u.am.flags;
2159 spin_lock(&dlm->spinlock);
2160 memcpy(nodemap, dlm->domain_map, sizeof(nodemap));
2161 spin_unlock(&dlm->spinlock);
2163 clear_bit(dlm->node_num, nodemap);
2164 if (ignore_higher) {
2165 /* if is this just to clear up mles for nodes below
2166 * this node, do not send the message to the original
2167 * caller or any node number higher than this */
2168 clear_bit(request_from, nodemap);
2169 bit = dlm->node_num;
2171 bit = find_next_bit(nodemap, O2NM_MAX_NODES,
2173 if (bit >= O2NM_MAX_NODES)
2175 clear_bit(bit, nodemap);
2180 * If we're migrating this lock to someone else, we are no
2181 * longer allowed to assert out own mastery. OTOH, we need to
2182 * prevent migration from starting while we're still asserting
2183 * our dominance. The reserved ast delays migration.
2185 spin_lock(&res->spinlock);
2186 if (res->state & DLM_LOCK_RES_MIGRATING) {
2187 mlog(0, "Someone asked us to assert mastery, but we're "
2188 "in the middle of migration. Skipping assert, "
2189 "the new master will handle that.\n");
2190 spin_unlock(&res->spinlock);
2193 __dlm_lockres_reserve_ast(res);
2194 spin_unlock(&res->spinlock);
2196 /* this call now finishes out the nodemap
2197 * even if one or more nodes die */
2198 mlog(0, "worker about to master %.*s here, this=%u\n",
2199 res->lockname.len, res->lockname.name, dlm->node_num);
2200 ret = dlm_do_assert_master(dlm, res, nodemap, flags);
2202 /* no need to restart, we are done */
2203 if (!dlm_is_host_down(ret))
2207 /* Ok, we've asserted ourselves. Let's let migration start. */
2208 dlm_lockres_release_ast(dlm, res);
2211 dlm_lockres_put(res);
2213 mlog(0, "finished with dlm_assert_master_worker\n");
2216 /* SPECIAL CASE for the $RECOVERY lock used by the recovery thread.
2217 * We cannot wait for node recovery to complete to begin mastering this
2218 * lockres because this lockres is used to kick off recovery! ;-)
2219 * So, do a pre-check on all living nodes to see if any of those nodes
2220 * think that $RECOVERY is currently mastered by a dead node. If so,
2221 * we wait a short time to allow that node to get notified by its own
2222 * heartbeat stack, then check again. All $RECOVERY lock resources
2223 * mastered by dead nodes are purged when the hearbeat callback is
2224 * fired, so we can know for sure that it is safe to continue once
2225 * the node returns a live node or no node. */
2226 static int dlm_pre_master_reco_lockres(struct dlm_ctxt *dlm,
2227 struct dlm_lock_resource *res)
2229 struct dlm_node_iter iter;
2232 u8 master = DLM_LOCK_RES_OWNER_UNKNOWN;
2234 spin_lock(&dlm->spinlock);
2235 dlm_node_iter_init(dlm->domain_map, &iter);
2236 spin_unlock(&dlm->spinlock);
2238 while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
2239 /* do not send to self */
2240 if (nodenum == dlm->node_num)
2242 ret = dlm_do_master_requery(dlm, res, nodenum, &master);
2245 if (!dlm_is_host_down(ret))
2247 /* host is down, so answer for that node would be
2248 * DLM_LOCK_RES_OWNER_UNKNOWN. continue. */
2252 if (master != DLM_LOCK_RES_OWNER_UNKNOWN) {
2253 /* check to see if this master is in the recovery map */
2254 spin_lock(&dlm->spinlock);
2255 if (test_bit(master, dlm->recovery_map)) {
2256 mlog(ML_NOTICE, "%s: node %u has not seen "
2257 "node %u go down yet, and thinks the "
2258 "dead node is mastering the recovery "
2259 "lock. must wait.\n", dlm->name,
2263 spin_unlock(&dlm->spinlock);
2264 mlog(0, "%s: reco lock master is %u\n", dlm->name,
2273 * DLM_DEREF_LOCKRES_MSG
2276 int dlm_drop_lockres_ref(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
2278 struct dlm_deref_lockres deref;
2280 const char *lockname;
2281 unsigned int namelen;
2283 lockname = res->lockname.name;
2284 namelen = res->lockname.len;
2285 BUG_ON(namelen > O2NM_MAX_NAME_LEN);
2287 mlog(0, "%s:%.*s: sending deref to %d\n",
2288 dlm->name, namelen, lockname, res->owner);
2289 memset(&deref, 0, sizeof(deref));
2290 deref.node_idx = dlm->node_num;
2291 deref.namelen = namelen;
2292 memcpy(deref.name, lockname, namelen);
2294 ret = o2net_send_message(DLM_DEREF_LOCKRES_MSG, dlm->key,
2295 &deref, sizeof(deref), res->owner, &r);
2299 /* BAD. other node says I did not have a ref. */
2300 mlog(ML_ERROR,"while dropping ref on %s:%.*s "
2301 "(master=%u) got %d.\n", dlm->name, namelen,
2302 lockname, res->owner, r);
2303 dlm_print_one_lock_resource(res);
2309 int dlm_deref_lockres_handler(struct o2net_msg *msg, u32 len, void *data,
2312 struct dlm_ctxt *dlm = data;
2313 struct dlm_deref_lockres *deref = (struct dlm_deref_lockres *)msg->buf;
2314 struct dlm_lock_resource *res = NULL;
2316 unsigned int namelen;
2320 struct dlm_work_item *item;
2328 namelen = deref->namelen;
2329 node = deref->node_idx;
2331 if (namelen > DLM_LOCKID_NAME_MAX) {
2332 mlog(ML_ERROR, "Invalid name length!");
2335 if (deref->node_idx >= O2NM_MAX_NODES) {
2336 mlog(ML_ERROR, "Invalid node number: %u\n", node);
2340 hash = dlm_lockid_hash(name, namelen);
2342 spin_lock(&dlm->spinlock);
2343 res = __dlm_lookup_lockres_full(dlm, name, namelen, hash);
2345 spin_unlock(&dlm->spinlock);
2346 mlog(ML_ERROR, "%s:%.*s: bad lockres name\n",
2347 dlm->name, namelen, name);
2350 spin_unlock(&dlm->spinlock);
2352 spin_lock(&res->spinlock);
2353 if (res->state & DLM_LOCK_RES_SETREF_INPROG)
2356 BUG_ON(res->state & DLM_LOCK_RES_DROPPING_REF);
2357 if (test_bit(node, res->refmap)) {
2358 dlm_lockres_clear_refmap_bit(node, res);
2362 spin_unlock(&res->spinlock);
2366 dlm_lockres_calc_usage(dlm, res);
2368 mlog(ML_ERROR, "%s:%.*s: node %u trying to drop ref "
2369 "but it is already dropped!\n", dlm->name,
2370 res->lockname.len, res->lockname.name, node);
2371 dlm_print_one_lock_resource(res);
2377 item = kzalloc(sizeof(*item), GFP_NOFS);
2384 dlm_init_work_item(dlm, item, dlm_deref_lockres_worker, NULL);
2385 item->u.dl.deref_res = res;
2386 item->u.dl.deref_node = node;
2388 spin_lock(&dlm->work_lock);
2389 list_add_tail(&item->list, &dlm->work_list);
2390 spin_unlock(&dlm->work_lock);
2392 queue_work(dlm->dlm_worker, &dlm->dispatched_work);
2397 dlm_lockres_put(res);
2403 static void dlm_deref_lockres_worker(struct dlm_work_item *item, void *data)
2405 struct dlm_ctxt *dlm;
2406 struct dlm_lock_resource *res;
2411 res = item->u.dl.deref_res;
2412 node = item->u.dl.deref_node;
2414 spin_lock(&res->spinlock);
2415 BUG_ON(res->state & DLM_LOCK_RES_DROPPING_REF);
2416 if (test_bit(node, res->refmap)) {
2417 __dlm_wait_on_lockres_flags(res, DLM_LOCK_RES_SETREF_INPROG);
2418 dlm_lockres_clear_refmap_bit(node, res);
2421 spin_unlock(&res->spinlock);
2424 mlog(0, "%s:%.*s node %u ref dropped in dispatch\n",
2425 dlm->name, res->lockname.len, res->lockname.name, node);
2426 dlm_lockres_calc_usage(dlm, res);
2428 mlog(ML_ERROR, "%s:%.*s: node %u trying to drop ref "
2429 "but it is already dropped!\n", dlm->name,
2430 res->lockname.len, res->lockname.name, node);
2431 dlm_print_one_lock_resource(res);
2434 dlm_lockres_put(res);
2437 /* Checks whether the lockres can be migrated. Returns 0 if yes, < 0
2438 * if not. If 0, numlocks is set to the number of locks in the lockres.
2440 static int dlm_is_lockres_migrateable(struct dlm_ctxt *dlm,
2441 struct dlm_lock_resource *res,
2447 struct list_head *queue;
2448 struct dlm_lock *lock;
2450 assert_spin_locked(&res->spinlock);
2453 if (res->owner == DLM_LOCK_RES_OWNER_UNKNOWN) {
2454 mlog(0, "cannot migrate lockres with unknown owner!\n");
2458 if (res->owner != dlm->node_num) {
2459 mlog(0, "cannot migrate lockres this node doesn't own!\n");
2464 queue = &res->granted;
2465 for (i = 0; i < 3; i++) {
2466 list_for_each_entry(lock, queue, list) {
2468 if (lock->ml.node == dlm->node_num) {
2469 mlog(0, "found a lock owned by this node still "
2470 "on the %s queue! will not migrate this "
2471 "lockres\n", (i == 0 ? "granted" :
2472 (i == 1 ? "converting" :
2482 mlog(0, "migrateable lockres having %d locks\n", *numlocks);
2489 * DLM_MIGRATE_LOCKRES
2493 static int dlm_migrate_lockres(struct dlm_ctxt *dlm,
2494 struct dlm_lock_resource *res,
2497 struct dlm_master_list_entry *mle = NULL;
2498 struct dlm_master_list_entry *oldmle = NULL;
2499 struct dlm_migratable_lockres *mres = NULL;
2502 unsigned int namelen;
2510 name = res->lockname.name;
2511 namelen = res->lockname.len;
2513 mlog(0, "migrating %.*s to %u\n", namelen, name, target);
2516 * ensure this lockres is a proper candidate for migration
2518 spin_lock(&res->spinlock);
2519 ret = dlm_is_lockres_migrateable(dlm, res, &numlocks);
2521 spin_unlock(&res->spinlock);
2524 spin_unlock(&res->spinlock);
2527 if (numlocks == 0) {
2528 mlog(0, "no locks were found on this lockres! done!\n");
2533 * preallocate up front
2534 * if this fails, abort
2538 mres = (struct dlm_migratable_lockres *) __get_free_page(GFP_NOFS);
2544 mle = (struct dlm_master_list_entry *) kmem_cache_alloc(dlm_mle_cache,
2553 * find a node to migrate the lockres to
2556 mlog(0, "picking a migration node\n");
2557 spin_lock(&dlm->spinlock);
2558 /* pick a new node */
2559 if (!test_bit(target, dlm->domain_map) ||
2560 target >= O2NM_MAX_NODES) {
2561 target = dlm_pick_migration_target(dlm, res);
2563 mlog(0, "node %u chosen for migration\n", target);
2565 if (target >= O2NM_MAX_NODES ||
2566 !test_bit(target, dlm->domain_map)) {
2567 /* target chosen is not alive */
2572 spin_unlock(&dlm->spinlock);
2576 mlog(0, "continuing with target = %u\n", target);
2579 * clear any existing master requests and
2580 * add the migration mle to the list
2582 spin_lock(&dlm->master_lock);
2583 ret = dlm_add_migration_mle(dlm, res, mle, &oldmle, name,
2584 namelen, target, dlm->node_num);
2585 spin_unlock(&dlm->master_lock);
2586 spin_unlock(&dlm->spinlock);
2588 if (ret == -EEXIST) {
2589 mlog(0, "another process is already migrating it\n");
2595 * set the MIGRATING flag and flush asts
2596 * if we fail after this we need to re-dirty the lockres
2598 if (dlm_mark_lockres_migrating(dlm, res, target) < 0) {
2599 mlog(ML_ERROR, "tried to migrate %.*s to %u, but "
2600 "the target went down.\n", res->lockname.len,
2601 res->lockname.name, target);
2602 spin_lock(&res->spinlock);
2603 res->state &= ~DLM_LOCK_RES_MIGRATING;
2605 spin_unlock(&res->spinlock);
2611 /* master is known, detach if not already detached */
2612 dlm_mle_detach_hb_events(dlm, oldmle);
2613 dlm_put_mle(oldmle);
2618 dlm_mle_detach_hb_events(dlm, mle);
2621 kmem_cache_free(dlm_mle_cache, mle);
2627 * at this point, we have a migration target, an mle
2628 * in the master list, and the MIGRATING flag set on
2632 /* now that remote nodes are spinning on the MIGRATING flag,
2633 * ensure that all assert_master work is flushed. */
2634 flush_workqueue(dlm->dlm_worker);
2636 /* get an extra reference on the mle.
2637 * otherwise the assert_master from the new
2638 * master will destroy this.
2639 * also, make sure that all callers of dlm_get_mle
2640 * take both dlm->spinlock and dlm->master_lock */
2641 spin_lock(&dlm->spinlock);
2642 spin_lock(&dlm->master_lock);
2643 dlm_get_mle_inuse(mle);
2644 spin_unlock(&dlm->master_lock);
2645 spin_unlock(&dlm->spinlock);
2647 /* notify new node and send all lock state */
2648 /* call send_one_lockres with migration flag.
2649 * this serves as notice to the target node that a
2650 * migration is starting. */
2651 ret = dlm_send_one_lockres(dlm, res, mres, target,
2652 DLM_MRES_MIGRATION);
2655 mlog(0, "migration to node %u failed with %d\n",
2657 /* migration failed, detach and clean up mle */
2658 dlm_mle_detach_hb_events(dlm, mle);
2660 dlm_put_mle_inuse(mle);
2661 spin_lock(&res->spinlock);
2662 res->state &= ~DLM_LOCK_RES_MIGRATING;
2664 spin_unlock(&res->spinlock);
2668 /* at this point, the target sends a message to all nodes,
2669 * (using dlm_do_migrate_request). this node is skipped since
2670 * we had to put an mle in the list to begin the process. this
2671 * node now waits for target to do an assert master. this node
2672 * will be the last one notified, ensuring that the migration
2673 * is complete everywhere. if the target dies while this is
2674 * going on, some nodes could potentially see the target as the
2675 * master, so it is important that my recovery finds the migration
2676 * mle and sets the master to UNKNONWN. */
2679 /* wait for new node to assert master */
2681 ret = wait_event_interruptible_timeout(mle->wq,
2682 (atomic_read(&mle->woken) == 1),
2683 msecs_to_jiffies(5000));
2686 if (atomic_read(&mle->woken) == 1 ||
2687 res->owner == target)
2690 mlog(0, "%s:%.*s: timed out during migration\n",
2691 dlm->name, res->lockname.len, res->lockname.name);
2692 /* avoid hang during shutdown when migrating lockres
2693 * to a node which also goes down */
2694 if (dlm_is_node_dead(dlm, target)) {
2695 mlog(0, "%s:%.*s: expected migration "
2696 "target %u is no longer up, restarting\n",
2697 dlm->name, res->lockname.len,
2698 res->lockname.name, target);
2700 /* migration failed, detach and clean up mle */
2701 dlm_mle_detach_hb_events(dlm, mle);
2703 dlm_put_mle_inuse(mle);
2704 spin_lock(&res->spinlock);
2705 res->state &= ~DLM_LOCK_RES_MIGRATING;
2707 spin_unlock(&res->spinlock);
2711 mlog(0, "%s:%.*s: caught signal during migration\n",
2712 dlm->name, res->lockname.len, res->lockname.name);
2715 /* all done, set the owner, clear the flag */
2716 spin_lock(&res->spinlock);
2717 dlm_set_lockres_owner(dlm, res, target);
2718 res->state &= ~DLM_LOCK_RES_MIGRATING;
2719 dlm_remove_nonlocal_locks(dlm, res);
2720 spin_unlock(&res->spinlock);
2723 /* master is known, detach if not already detached */
2724 dlm_mle_detach_hb_events(dlm, mle);
2725 dlm_put_mle_inuse(mle);
2728 dlm_lockres_calc_usage(dlm, res);
2731 /* re-dirty the lockres if we failed */
2733 dlm_kick_thread(dlm, res);
2735 /* wake up waiters if the MIGRATING flag got set
2736 * but migration failed */
2742 free_page((unsigned long)mres);
2746 mlog(0, "returning %d\n", ret);
2750 #define DLM_MIGRATION_RETRY_MS 100
2752 /* Should be called only after beginning the domain leave process.
2753 * There should not be any remaining locks on nonlocal lock resources,
2754 * and there should be no local locks left on locally mastered resources.
2756 * Called with the dlm spinlock held, may drop it to do migration, but
2757 * will re-acquire before exit.
2759 * Returns: 1 if dlm->spinlock was dropped/retaken, 0 if never dropped */
2760 int dlm_empty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
2763 int lock_dropped = 0;
2766 spin_lock(&res->spinlock);
2767 if (res->owner != dlm->node_num) {
2768 if (!__dlm_lockres_unused(res)) {
2769 mlog(ML_ERROR, "%s:%.*s: this node is not master, "
2770 "trying to free this but locks remain\n",
2771 dlm->name, res->lockname.len, res->lockname.name);
2773 spin_unlock(&res->spinlock);
2777 /* No need to migrate a lockres having no locks */
2778 ret = dlm_is_lockres_migrateable(dlm, res, &numlocks);
2779 if (ret >= 0 && numlocks == 0) {
2780 spin_unlock(&res->spinlock);
2783 spin_unlock(&res->spinlock);
2785 /* Wheee! Migrate lockres here! Will sleep so drop spinlock. */
2786 spin_unlock(&dlm->spinlock);
2789 ret = dlm_migrate_lockres(dlm, res, O2NM_MAX_NODES);
2792 if (ret == -ENOTEMPTY) {
2793 mlog(ML_ERROR, "lockres %.*s still has local locks!\n",
2794 res->lockname.len, res->lockname.name);
2798 mlog(0, "lockres %.*s: migrate failed, "
2799 "retrying\n", res->lockname.len,
2800 res->lockname.name);
2801 msleep(DLM_MIGRATION_RETRY_MS);
2803 spin_lock(&dlm->spinlock);
2805 return lock_dropped;
2808 int dlm_lock_basts_flushed(struct dlm_ctxt *dlm, struct dlm_lock *lock)
2811 spin_lock(&dlm->ast_lock);
2812 spin_lock(&lock->spinlock);
2813 ret = (list_empty(&lock->bast_list) && !lock->bast_pending);
2814 spin_unlock(&lock->spinlock);
2815 spin_unlock(&dlm->ast_lock);
2819 static int dlm_migration_can_proceed(struct dlm_ctxt *dlm,
2820 struct dlm_lock_resource *res,
2824 spin_lock(&res->spinlock);
2825 can_proceed = !!(res->state & DLM_LOCK_RES_MIGRATING);
2826 spin_unlock(&res->spinlock);
2828 /* target has died, so make the caller break out of the
2829 * wait_event, but caller must recheck the domain_map */
2830 spin_lock(&dlm->spinlock);
2831 if (!test_bit(mig_target, dlm->domain_map))
2833 spin_unlock(&dlm->spinlock);
2837 static int dlm_lockres_is_dirty(struct dlm_ctxt *dlm,
2838 struct dlm_lock_resource *res)
2841 spin_lock(&res->spinlock);
2842 ret = !!(res->state & DLM_LOCK_RES_DIRTY);
2843 spin_unlock(&res->spinlock);
2848 static int dlm_mark_lockres_migrating(struct dlm_ctxt *dlm,
2849 struct dlm_lock_resource *res,
2854 mlog(0, "dlm_mark_lockres_migrating: %.*s, from %u to %u\n",
2855 res->lockname.len, res->lockname.name, dlm->node_num,
2857 /* need to set MIGRATING flag on lockres. this is done by
2858 * ensuring that all asts have been flushed for this lockres. */
2859 spin_lock(&res->spinlock);
2860 BUG_ON(res->migration_pending);
2861 res->migration_pending = 1;
2862 /* strategy is to reserve an extra ast then release
2863 * it below, letting the release do all of the work */
2864 __dlm_lockres_reserve_ast(res);
2865 spin_unlock(&res->spinlock);
2867 /* now flush all the pending asts */
2868 dlm_kick_thread(dlm, res);
2869 /* before waiting on DIRTY, block processes which may
2870 * try to dirty the lockres before MIGRATING is set */
2871 spin_lock(&res->spinlock);
2872 BUG_ON(res->state & DLM_LOCK_RES_BLOCK_DIRTY);
2873 res->state |= DLM_LOCK_RES_BLOCK_DIRTY;
2874 spin_unlock(&res->spinlock);
2875 /* now wait on any pending asts and the DIRTY state */
2876 wait_event(dlm->ast_wq, !dlm_lockres_is_dirty(dlm, res));
2877 dlm_lockres_release_ast(dlm, res);
2879 mlog(0, "about to wait on migration_wq, dirty=%s\n",
2880 res->state & DLM_LOCK_RES_DIRTY ? "yes" : "no");
2881 /* if the extra ref we just put was the final one, this
2882 * will pass thru immediately. otherwise, we need to wait
2883 * for the last ast to finish. */
2885 ret = wait_event_interruptible_timeout(dlm->migration_wq,
2886 dlm_migration_can_proceed(dlm, res, target),
2887 msecs_to_jiffies(1000));
2889 mlog(0, "woken again: migrating? %s, dead? %s\n",
2890 res->state & DLM_LOCK_RES_MIGRATING ? "yes":"no",
2891 test_bit(target, dlm->domain_map) ? "no":"yes");
2893 mlog(0, "all is well: migrating? %s, dead? %s\n",
2894 res->state & DLM_LOCK_RES_MIGRATING ? "yes":"no",
2895 test_bit(target, dlm->domain_map) ? "no":"yes");
2897 if (!dlm_migration_can_proceed(dlm, res, target)) {
2898 mlog(0, "trying again...\n");
2901 /* now that we are sure the MIGRATING state is there, drop
2902 * the unneded state which blocked threads trying to DIRTY */
2903 spin_lock(&res->spinlock);
2904 BUG_ON(!(res->state & DLM_LOCK_RES_BLOCK_DIRTY));
2905 BUG_ON(!(res->state & DLM_LOCK_RES_MIGRATING));
2906 res->state &= ~DLM_LOCK_RES_BLOCK_DIRTY;
2907 spin_unlock(&res->spinlock);
2909 /* did the target go down or die? */
2910 spin_lock(&dlm->spinlock);
2911 if (!test_bit(target, dlm->domain_map)) {
2912 mlog(ML_ERROR, "aha. migration target %u just went down\n",
2916 spin_unlock(&dlm->spinlock);
2921 * o the DLM_LOCK_RES_MIGRATING flag is set
2922 * o there are no pending asts on this lockres
2923 * o all processes trying to reserve an ast on this
2924 * lockres must wait for the MIGRATING flag to clear
2929 /* last step in the migration process.
2930 * original master calls this to free all of the dlm_lock
2931 * structures that used to be for other nodes. */
2932 static void dlm_remove_nonlocal_locks(struct dlm_ctxt *dlm,
2933 struct dlm_lock_resource *res)
2935 struct list_head *queue = &res->granted;
2937 struct dlm_lock *lock, *next;
2939 assert_spin_locked(&res->spinlock);
2941 BUG_ON(res->owner == dlm->node_num);
2943 for (i=0; i<3; i++) {
2944 list_for_each_entry_safe(lock, next, queue, list) {
2945 if (lock->ml.node != dlm->node_num) {
2946 mlog(0, "putting lock for node %u\n",
2948 /* be extra careful */
2949 BUG_ON(!list_empty(&lock->ast_list));
2950 BUG_ON(!list_empty(&lock->bast_list));
2951 BUG_ON(lock->ast_pending);
2952 BUG_ON(lock->bast_pending);
2953 dlm_lockres_clear_refmap_bit(lock->ml.node, res);
2954 list_del_init(&lock->list);
2956 /* In a normal unlock, we would have added a
2957 * DLM_UNLOCK_FREE_LOCK action. Force it. */
2965 bit = find_next_bit(res->refmap, O2NM_MAX_NODES, bit);
2966 if (bit >= O2NM_MAX_NODES)
2968 /* do not clear the local node reference, if there is a
2969 * process holding this, let it drop the ref itself */
2970 if (bit != dlm->node_num) {
2971 mlog(0, "%s:%.*s: node %u had a ref to this "
2972 "migrating lockres, clearing\n", dlm->name,
2973 res->lockname.len, res->lockname.name, bit);
2974 dlm_lockres_clear_refmap_bit(bit, res);
2980 /* for now this is not too intelligent. we will
2981 * need stats to make this do the right thing.
2982 * this just finds the first lock on one of the
2983 * queues and uses that node as the target. */
2984 static u8 dlm_pick_migration_target(struct dlm_ctxt *dlm,
2985 struct dlm_lock_resource *res)
2988 struct list_head *queue = &res->granted;
2989 struct dlm_lock *lock;
2992 assert_spin_locked(&dlm->spinlock);
2994 spin_lock(&res->spinlock);
2995 for (i=0; i<3; i++) {
2996 list_for_each_entry(lock, queue, list) {
2997 /* up to the caller to make sure this node
2999 if (lock->ml.node != dlm->node_num) {
3000 spin_unlock(&res->spinlock);
3001 return lock->ml.node;
3006 spin_unlock(&res->spinlock);
3007 mlog(0, "have not found a suitable target yet! checking domain map\n");
3009 /* ok now we're getting desperate. pick anyone alive. */
3012 nodenum = find_next_bit(dlm->domain_map,
3013 O2NM_MAX_NODES, nodenum+1);
3014 mlog(0, "found %d in domain map\n", nodenum);
3015 if (nodenum >= O2NM_MAX_NODES)
3017 if (nodenum != dlm->node_num) {
3018 mlog(0, "picking %d\n", nodenum);
3023 mlog(0, "giving up. no master to migrate to\n");
3024 return DLM_LOCK_RES_OWNER_UNKNOWN;
3029 /* this is called by the new master once all lockres
3030 * data has been received */
3031 static int dlm_do_migrate_request(struct dlm_ctxt *dlm,
3032 struct dlm_lock_resource *res,
3033 u8 master, u8 new_master,
3034 struct dlm_node_iter *iter)
3036 struct dlm_migrate_request migrate;
3037 int ret, status = 0;
3040 memset(&migrate, 0, sizeof(migrate));
3041 migrate.namelen = res->lockname.len;
3042 memcpy(migrate.name, res->lockname.name, migrate.namelen);
3043 migrate.new_master = new_master;
3044 migrate.master = master;
3048 /* send message to all nodes, except the master and myself */
3049 while ((nodenum = dlm_node_iter_next(iter)) >= 0) {
3050 if (nodenum == master ||
3051 nodenum == new_master)
3054 ret = o2net_send_message(DLM_MIGRATE_REQUEST_MSG, dlm->key,
3055 &migrate, sizeof(migrate), nodenum,
3059 else if (status < 0) {
3060 mlog(0, "migrate request (node %u) returned %d!\n",
3063 } else if (status == DLM_MIGRATE_RESPONSE_MASTERY_REF) {
3064 /* during the migration request we short-circuited
3065 * the mastery of the lockres. make sure we have
3066 * a mastery ref for nodenum */
3067 mlog(0, "%s:%.*s: need ref for node %u\n",
3068 dlm->name, res->lockname.len, res->lockname.name,
3070 spin_lock(&res->spinlock);
3071 dlm_lockres_set_refmap_bit(nodenum, res);
3072 spin_unlock(&res->spinlock);
3079 mlog(0, "returning ret=%d\n", ret);
3084 /* if there is an existing mle for this lockres, we now know who the master is.
3085 * (the one who sent us *this* message) we can clear it up right away.
3086 * since the process that put the mle on the list still has a reference to it,
3087 * we can unhash it now, set the master and wake the process. as a result,
3088 * we will have no mle in the list to start with. now we can add an mle for
3089 * the migration and this should be the only one found for those scanning the
3091 int dlm_migrate_request_handler(struct o2net_msg *msg, u32 len, void *data,
3094 struct dlm_ctxt *dlm = data;
3095 struct dlm_lock_resource *res = NULL;
3096 struct dlm_migrate_request *migrate = (struct dlm_migrate_request *) msg->buf;
3097 struct dlm_master_list_entry *mle = NULL, *oldmle = NULL;
3099 unsigned int namelen, hash;
3105 name = migrate->name;
3106 namelen = migrate->namelen;
3107 hash = dlm_lockid_hash(name, namelen);
3109 /* preallocate.. if this fails, abort */
3110 mle = (struct dlm_master_list_entry *) kmem_cache_alloc(dlm_mle_cache,
3118 /* check for pre-existing lock */
3119 spin_lock(&dlm->spinlock);
3120 res = __dlm_lookup_lockres(dlm, name, namelen, hash);
3121 spin_lock(&dlm->master_lock);
3124 spin_lock(&res->spinlock);
3125 if (res->state & DLM_LOCK_RES_RECOVERING) {
3126 /* if all is working ok, this can only mean that we got
3127 * a migrate request from a node that we now see as
3128 * dead. what can we do here? drop it to the floor? */
3129 spin_unlock(&res->spinlock);
3130 mlog(ML_ERROR, "Got a migrate request, but the "
3131 "lockres is marked as recovering!");
3132 kmem_cache_free(dlm_mle_cache, mle);
3133 ret = -EINVAL; /* need a better solution */
3136 res->state |= DLM_LOCK_RES_MIGRATING;
3137 spin_unlock(&res->spinlock);
3140 /* ignore status. only nonzero status would BUG. */
3141 ret = dlm_add_migration_mle(dlm, res, mle, &oldmle,
3143 migrate->new_master,
3147 spin_unlock(&dlm->master_lock);
3148 spin_unlock(&dlm->spinlock);
3151 /* master is known, detach if not already detached */
3152 dlm_mle_detach_hb_events(dlm, oldmle);
3153 dlm_put_mle(oldmle);
3157 dlm_lockres_put(res);
3163 /* must be holding dlm->spinlock and dlm->master_lock
3164 * when adding a migration mle, we can clear any other mles
3165 * in the master list because we know with certainty that
3166 * the master is "master". so we remove any old mle from
3167 * the list after setting it's master field, and then add
3168 * the new migration mle. this way we can hold with the rule
3169 * of having only one mle for a given lock name at all times. */
3170 static int dlm_add_migration_mle(struct dlm_ctxt *dlm,
3171 struct dlm_lock_resource *res,
3172 struct dlm_master_list_entry *mle,
3173 struct dlm_master_list_entry **oldmle,
3174 const char *name, unsigned int namelen,
3175 u8 new_master, u8 master)
3184 assert_spin_locked(&dlm->spinlock);
3185 assert_spin_locked(&dlm->master_lock);
3187 /* caller is responsible for any ref taken here on oldmle */
3188 found = dlm_find_mle(dlm, oldmle, (char *)name, namelen);
3190 struct dlm_master_list_entry *tmp = *oldmle;
3191 spin_lock(&tmp->spinlock);
3192 if (tmp->type == DLM_MLE_MIGRATION) {
3193 if (master == dlm->node_num) {
3194 /* ah another process raced me to it */
3195 mlog(0, "tried to migrate %.*s, but some "
3196 "process beat me to it\n",
3200 /* bad. 2 NODES are trying to migrate! */
3201 mlog(ML_ERROR, "migration error mle: "
3202 "master=%u new_master=%u // request: "
3203 "master=%u new_master=%u // "
3205 tmp->master, tmp->new_master,
3211 /* this is essentially what assert_master does */
3212 tmp->master = master;
3213 atomic_set(&tmp->woken, 1);
3215 /* remove it from the list so that only one
3216 * mle will be found */
3217 list_del_init(&tmp->list);
3218 /* this was obviously WRONG. mle is uninited here. should be tmp. */
3219 __dlm_mle_detach_hb_events(dlm, tmp);
3220 ret = DLM_MIGRATE_RESPONSE_MASTERY_REF;
3221 mlog(0, "%s:%.*s: master=%u, newmaster=%u, "
3222 "telling master to get ref for cleared out mle "
3223 "during migration\n", dlm->name, namelen, name,
3224 master, new_master);
3226 spin_unlock(&tmp->spinlock);
3229 /* now add a migration mle to the tail of the list */
3230 dlm_init_mle(mle, DLM_MLE_MIGRATION, dlm, res, name, namelen);
3231 mle->new_master = new_master;
3232 /* the new master will be sending an assert master for this.
3233 * at that point we will get the refmap reference */
3234 mle->master = master;
3235 /* do this for consistency with other mle types */
3236 set_bit(new_master, mle->maybe_map);
3237 list_add(&mle->list, &dlm->master_list);
3243 void dlm_clean_master_list(struct dlm_ctxt *dlm, u8 dead_node)
3245 struct dlm_master_list_entry *mle, *next;
3246 struct dlm_lock_resource *res;
3249 mlog_entry("dlm=%s, dead node=%u\n", dlm->name, dead_node);
3251 assert_spin_locked(&dlm->spinlock);
3253 /* clean the master list */
3254 spin_lock(&dlm->master_lock);
3255 list_for_each_entry_safe(mle, next, &dlm->master_list, list) {
3256 BUG_ON(mle->type != DLM_MLE_BLOCK &&
3257 mle->type != DLM_MLE_MASTER &&
3258 mle->type != DLM_MLE_MIGRATION);
3260 /* MASTER mles are initiated locally. the waiting
3261 * process will notice the node map change
3262 * shortly. let that happen as normal. */
3263 if (mle->type == DLM_MLE_MASTER)
3267 /* BLOCK mles are initiated by other nodes.
3268 * need to clean up if the dead node would have
3269 * been the master. */
3270 if (mle->type == DLM_MLE_BLOCK) {
3273 spin_lock(&mle->spinlock);
3274 bit = find_next_bit(mle->maybe_map, O2NM_MAX_NODES, 0);
3275 if (bit != dead_node) {
3276 mlog(0, "mle found, but dead node %u would "
3277 "not have been master\n", dead_node);
3278 spin_unlock(&mle->spinlock);
3280 /* must drop the refcount by one since the
3281 * assert_master will never arrive. this
3282 * may result in the mle being unlinked and
3283 * freed, but there may still be a process
3284 * waiting in the dlmlock path which is fine. */
3285 mlog(0, "node %u was expected master\n",
3287 atomic_set(&mle->woken, 1);
3288 spin_unlock(&mle->spinlock);
3290 /* do not need events any longer, so detach
3292 __dlm_mle_detach_hb_events(dlm, mle);
3298 /* everything else is a MIGRATION mle */
3300 /* the rule for MIGRATION mles is that the master
3301 * becomes UNKNOWN if *either* the original or
3302 * the new master dies. all UNKNOWN lockreses
3303 * are sent to whichever node becomes the recovery
3304 * master. the new master is responsible for
3305 * determining if there is still a master for
3306 * this lockres, or if he needs to take over
3307 * mastery. either way, this node should expect
3308 * another message to resolve this. */
3309 if (mle->master != dead_node &&
3310 mle->new_master != dead_node)
3313 /* if we have reached this point, this mle needs to
3314 * be removed from the list and freed. */
3316 /* remove from the list early. NOTE: unlinking
3317 * list_head while in list_for_each_safe */
3318 __dlm_mle_detach_hb_events(dlm, mle);
3319 spin_lock(&mle->spinlock);
3320 list_del_init(&mle->list);
3321 atomic_set(&mle->woken, 1);
3322 spin_unlock(&mle->spinlock);
3325 mlog(0, "%s: node %u died during migration from "
3326 "%u to %u!\n", dlm->name, dead_node,
3327 mle->master, mle->new_master);
3328 /* if there is a lockres associated with this
3329 * mle, find it and set its owner to UNKNOWN */
3330 hash = dlm_lockid_hash(mle->u.name.name, mle->u.name.len);
3331 res = __dlm_lookup_lockres(dlm, mle->u.name.name,
3332 mle->u.name.len, hash);
3334 /* unfortunately if we hit this rare case, our
3335 * lock ordering is messed. we need to drop
3336 * the master lock so that we can take the
3337 * lockres lock, meaning that we will have to
3338 * restart from the head of list. */
3339 spin_unlock(&dlm->master_lock);
3341 /* move lockres onto recovery list */
3342 spin_lock(&res->spinlock);
3343 dlm_set_lockres_owner(dlm, res,
3344 DLM_LOCK_RES_OWNER_UNKNOWN);
3345 dlm_move_lockres_to_recovery_list(dlm, res);
3346 spin_unlock(&res->spinlock);
3347 dlm_lockres_put(res);
3349 /* about to get rid of mle, detach from heartbeat */
3350 __dlm_mle_detach_hb_events(dlm, mle);
3353 spin_lock(&dlm->master_lock);
3355 spin_unlock(&dlm->master_lock);
3361 /* this may be the last reference */
3364 spin_unlock(&dlm->master_lock);
3368 int dlm_finish_migration(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
3371 struct dlm_node_iter iter;
3374 spin_lock(&dlm->spinlock);
3375 dlm_node_iter_init(dlm->domain_map, &iter);
3376 clear_bit(old_master, iter.node_map);
3377 clear_bit(dlm->node_num, iter.node_map);
3378 spin_unlock(&dlm->spinlock);
3380 /* ownership of the lockres is changing. account for the
3381 * mastery reference here since old_master will briefly have
3382 * a reference after the migration completes */
3383 spin_lock(&res->spinlock);
3384 dlm_lockres_set_refmap_bit(old_master, res);
3385 spin_unlock(&res->spinlock);
3387 mlog(0, "now time to do a migrate request to other nodes\n");
3388 ret = dlm_do_migrate_request(dlm, res, old_master,
3389 dlm->node_num, &iter);
3395 mlog(0, "doing assert master of %.*s to all except the original node\n",
3396 res->lockname.len, res->lockname.name);
3397 /* this call now finishes out the nodemap
3398 * even if one or more nodes die */
3399 ret = dlm_do_assert_master(dlm, res, iter.node_map,
3400 DLM_ASSERT_MASTER_FINISH_MIGRATION);
3402 /* no longer need to retry. all living nodes contacted. */
3407 memset(iter.node_map, 0, sizeof(iter.node_map));
3408 set_bit(old_master, iter.node_map);
3409 mlog(0, "doing assert master of %.*s back to %u\n",
3410 res->lockname.len, res->lockname.name, old_master);
3411 ret = dlm_do_assert_master(dlm, res, iter.node_map,
3412 DLM_ASSERT_MASTER_FINISH_MIGRATION);
3414 mlog(0, "assert master to original master failed "
3416 /* the only nonzero status here would be because of
3417 * a dead original node. we're done. */
3421 /* all done, set the owner, clear the flag */
3422 spin_lock(&res->spinlock);
3423 dlm_set_lockres_owner(dlm, res, dlm->node_num);
3424 res->state &= ~DLM_LOCK_RES_MIGRATING;
3425 spin_unlock(&res->spinlock);
3426 /* re-dirty it on the new master */
3427 dlm_kick_thread(dlm, res);
3434 * LOCKRES AST REFCOUNT
3435 * this is integral to migration
3438 /* for future intent to call an ast, reserve one ahead of time.
3439 * this should be called only after waiting on the lockres
3440 * with dlm_wait_on_lockres, and while still holding the
3441 * spinlock after the call. */
3442 void __dlm_lockres_reserve_ast(struct dlm_lock_resource *res)
3444 assert_spin_locked(&res->spinlock);
3445 if (res->state & DLM_LOCK_RES_MIGRATING) {
3446 __dlm_print_one_lock_resource(res);
3448 BUG_ON(res->state & DLM_LOCK_RES_MIGRATING);
3450 atomic_inc(&res->asts_reserved);
3454 * used to drop the reserved ast, either because it went unused,
3455 * or because the ast/bast was actually called.
3457 * also, if there is a pending migration on this lockres,
3458 * and this was the last pending ast on the lockres,
3459 * atomically set the MIGRATING flag before we drop the lock.
3460 * this is how we ensure that migration can proceed with no
3461 * asts in progress. note that it is ok if the state of the
3462 * queues is such that a lock should be granted in the future
3463 * or that a bast should be fired, because the new master will
3464 * shuffle the lists on this lockres as soon as it is migrated.
3466 void dlm_lockres_release_ast(struct dlm_ctxt *dlm,
3467 struct dlm_lock_resource *res)
3469 if (!atomic_dec_and_lock(&res->asts_reserved, &res->spinlock))
3472 if (!res->migration_pending) {
3473 spin_unlock(&res->spinlock);
3477 BUG_ON(res->state & DLM_LOCK_RES_MIGRATING);
3478 res->migration_pending = 0;
3479 res->state |= DLM_LOCK_RES_MIGRATING;
3480 spin_unlock(&res->spinlock);
3482 wake_up(&dlm->migration_wq);