]> err.no Git - linux-2.6/blob - fs/ocfs2/dlm/dlmrecovery.c
ocfs2_dlm: Silence a failed convert
[linux-2.6] / fs / ocfs2 / dlm / dlmrecovery.c
1 /* -*- mode: c; c-basic-offset: 8; -*-
2  * vim: noexpandtab sw=8 ts=8 sts=0:
3  *
4  * dlmrecovery.c
5  *
6  * recovery stuff
7  *
8  * Copyright (C) 2004 Oracle.  All rights reserved.
9  *
10  * This program is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU General Public
12  * License as published by the Free Software Foundation; either
13  * version 2 of the License, or (at your option) any later version.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18  * General Public License for more details.
19  *
20  * You should have received a copy of the GNU General Public
21  * License along with this program; if not, write to the
22  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
23  * Boston, MA 021110-1307, USA.
24  *
25  */
26
27
28 #include <linux/module.h>
29 #include <linux/fs.h>
30 #include <linux/types.h>
31 #include <linux/slab.h>
32 #include <linux/highmem.h>
33 #include <linux/utsname.h>
34 #include <linux/init.h>
35 #include <linux/sysctl.h>
36 #include <linux/random.h>
37 #include <linux/blkdev.h>
38 #include <linux/socket.h>
39 #include <linux/inet.h>
40 #include <linux/timer.h>
41 #include <linux/kthread.h>
42 #include <linux/delay.h>
43
44
45 #include "cluster/heartbeat.h"
46 #include "cluster/nodemanager.h"
47 #include "cluster/tcp.h"
48
49 #include "dlmapi.h"
50 #include "dlmcommon.h"
51 #include "dlmdomain.h"
52
53 #define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_RECOVERY)
54 #include "cluster/masklog.h"
55
56 static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node);
57
58 static int dlm_recovery_thread(void *data);
59 void dlm_complete_recovery_thread(struct dlm_ctxt *dlm);
60 int dlm_launch_recovery_thread(struct dlm_ctxt *dlm);
61 void dlm_kick_recovery_thread(struct dlm_ctxt *dlm);
62 static int dlm_do_recovery(struct dlm_ctxt *dlm);
63
64 static int dlm_pick_recovery_master(struct dlm_ctxt *dlm);
65 static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node);
66 static int dlm_init_recovery_area(struct dlm_ctxt *dlm, u8 dead_node);
67 static int dlm_request_all_locks(struct dlm_ctxt *dlm,
68                                  u8 request_from, u8 dead_node);
69 static void dlm_destroy_recovery_area(struct dlm_ctxt *dlm, u8 dead_node);
70
71 static inline int dlm_num_locks_in_lockres(struct dlm_lock_resource *res);
72 static void dlm_init_migratable_lockres(struct dlm_migratable_lockres *mres,
73                                         const char *lockname, int namelen,
74                                         int total_locks, u64 cookie,
75                                         u8 flags, u8 master);
76 static int dlm_send_mig_lockres_msg(struct dlm_ctxt *dlm,
77                                     struct dlm_migratable_lockres *mres,
78                                     u8 send_to,
79                                     struct dlm_lock_resource *res,
80                                     int total_locks);
81 static int dlm_process_recovery_data(struct dlm_ctxt *dlm,
82                                      struct dlm_lock_resource *res,
83                                      struct dlm_migratable_lockres *mres);
84 static int dlm_send_finalize_reco_message(struct dlm_ctxt *dlm);
85 static int dlm_send_all_done_msg(struct dlm_ctxt *dlm,
86                                  u8 dead_node, u8 send_to);
87 static int dlm_send_begin_reco_message(struct dlm_ctxt *dlm, u8 dead_node);
88 static void dlm_move_reco_locks_to_list(struct dlm_ctxt *dlm,
89                                         struct list_head *list, u8 dead_node);
90 static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm,
91                                               u8 dead_node, u8 new_master);
92 static void dlm_reco_ast(void *astdata);
93 static void dlm_reco_bast(void *astdata, int blocked_type);
94 static void dlm_reco_unlock_ast(void *astdata, enum dlm_status st);
95 static void dlm_request_all_locks_worker(struct dlm_work_item *item,
96                                          void *data);
97 static void dlm_mig_lockres_worker(struct dlm_work_item *item, void *data);
98 static int dlm_lockres_master_requery(struct dlm_ctxt *dlm,
99                                       struct dlm_lock_resource *res,
100                                       u8 *real_master);
101
102 static u64 dlm_get_next_mig_cookie(void);
103
104 static DEFINE_SPINLOCK(dlm_reco_state_lock);
105 static DEFINE_SPINLOCK(dlm_mig_cookie_lock);
106 static u64 dlm_mig_cookie = 1;
107
108 static u64 dlm_get_next_mig_cookie(void)
109 {
110         u64 c;
111         spin_lock(&dlm_mig_cookie_lock);
112         c = dlm_mig_cookie;
113         if (dlm_mig_cookie == (~0ULL))
114                 dlm_mig_cookie = 1;
115         else
116                 dlm_mig_cookie++;
117         spin_unlock(&dlm_mig_cookie_lock);
118         return c;
119 }
120
121 static inline void dlm_set_reco_dead_node(struct dlm_ctxt *dlm,
122                                           u8 dead_node)
123 {
124         assert_spin_locked(&dlm->spinlock);
125         if (dlm->reco.dead_node != dead_node)
126                 mlog(0, "%s: changing dead_node from %u to %u\n",
127                      dlm->name, dlm->reco.dead_node, dead_node);
128         dlm->reco.dead_node = dead_node;
129 }
130
131 static inline void dlm_set_reco_master(struct dlm_ctxt *dlm,
132                                        u8 master)
133 {
134         assert_spin_locked(&dlm->spinlock);
135         mlog(0, "%s: changing new_master from %u to %u\n",
136              dlm->name, dlm->reco.new_master, master);
137         dlm->reco.new_master = master;
138 }
139
140 static inline void __dlm_reset_recovery(struct dlm_ctxt *dlm)
141 {
142         assert_spin_locked(&dlm->spinlock);
143         clear_bit(dlm->reco.dead_node, dlm->recovery_map);
144         dlm_set_reco_dead_node(dlm, O2NM_INVALID_NODE_NUM);
145         dlm_set_reco_master(dlm, O2NM_INVALID_NODE_NUM);
146 }
147
148 static inline void dlm_reset_recovery(struct dlm_ctxt *dlm)
149 {
150         spin_lock(&dlm->spinlock);
151         __dlm_reset_recovery(dlm);
152         spin_unlock(&dlm->spinlock);
153 }
154
155 /* Worker function used during recovery. */
156 void dlm_dispatch_work(struct work_struct *work)
157 {
158         struct dlm_ctxt *dlm =
159                 container_of(work, struct dlm_ctxt, dispatched_work);
160         LIST_HEAD(tmp_list);
161         struct list_head *iter, *iter2;
162         struct dlm_work_item *item;
163         dlm_workfunc_t *workfunc;
164         int tot=0;
165
166         spin_lock(&dlm->work_lock);
167         list_splice_init(&dlm->work_list, &tmp_list);
168         spin_unlock(&dlm->work_lock);
169
170         list_for_each_safe(iter, iter2, &tmp_list) {
171                 tot++;
172         }
173         mlog(0, "%s: work thread has %d work items\n", dlm->name, tot);
174
175         list_for_each_safe(iter, iter2, &tmp_list) {
176                 item = list_entry(iter, struct dlm_work_item, list);
177                 workfunc = item->func;
178                 list_del_init(&item->list);
179
180                 /* already have ref on dlm to avoid having
181                  * it disappear.  just double-check. */
182                 BUG_ON(item->dlm != dlm);
183
184                 /* this is allowed to sleep and
185                  * call network stuff */
186                 workfunc(item, item->data);
187
188                 dlm_put(dlm);
189                 kfree(item);
190         }
191 }
192
193 /*
194  * RECOVERY THREAD
195  */
196
197 void dlm_kick_recovery_thread(struct dlm_ctxt *dlm)
198 {
199         /* wake the recovery thread
200          * this will wake the reco thread in one of three places
201          * 1) sleeping with no recovery happening
202          * 2) sleeping with recovery mastered elsewhere
203          * 3) recovery mastered here, waiting on reco data */
204
205         wake_up(&dlm->dlm_reco_thread_wq);
206 }
207
208 /* Launch the recovery thread */
209 int dlm_launch_recovery_thread(struct dlm_ctxt *dlm)
210 {
211         mlog(0, "starting dlm recovery thread...\n");
212
213         dlm->dlm_reco_thread_task = kthread_run(dlm_recovery_thread, dlm,
214                                                 "dlm_reco_thread");
215         if (IS_ERR(dlm->dlm_reco_thread_task)) {
216                 mlog_errno(PTR_ERR(dlm->dlm_reco_thread_task));
217                 dlm->dlm_reco_thread_task = NULL;
218                 return -EINVAL;
219         }
220
221         return 0;
222 }
223
224 void dlm_complete_recovery_thread(struct dlm_ctxt *dlm)
225 {
226         if (dlm->dlm_reco_thread_task) {
227                 mlog(0, "waiting for dlm recovery thread to exit\n");
228                 kthread_stop(dlm->dlm_reco_thread_task);
229                 dlm->dlm_reco_thread_task = NULL;
230         }
231 }
232
233
234
235 /*
236  * this is lame, but here's how recovery works...
237  * 1) all recovery threads cluster wide will work on recovering
238  *    ONE node at a time
239  * 2) negotiate who will take over all the locks for the dead node.
240  *    thats right... ALL the locks.
241  * 3) once a new master is chosen, everyone scans all locks
242  *    and moves aside those mastered by the dead guy
243  * 4) each of these locks should be locked until recovery is done
244  * 5) the new master collects up all of secondary lock queue info
245  *    one lock at a time, forcing each node to communicate back
246  *    before continuing
247  * 6) each secondary lock queue responds with the full known lock info
248  * 7) once the new master has run all its locks, it sends a ALLDONE!
249  *    message to everyone
250  * 8) upon receiving this message, the secondary queue node unlocks
251  *    and responds to the ALLDONE
252  * 9) once the new master gets responses from everyone, he unlocks
253  *    everything and recovery for this dead node is done
254  *10) go back to 2) while there are still dead nodes
255  *
256  */
257
258 static void dlm_print_reco_node_status(struct dlm_ctxt *dlm)
259 {
260         struct dlm_reco_node_data *ndata;
261         struct dlm_lock_resource *res;
262
263         mlog(ML_NOTICE, "%s(%d): recovery info, state=%s, dead=%u, master=%u\n",
264              dlm->name, dlm->dlm_reco_thread_task->pid,
265              dlm->reco.state & DLM_RECO_STATE_ACTIVE ? "ACTIVE" : "inactive",
266              dlm->reco.dead_node, dlm->reco.new_master);
267
268         list_for_each_entry(ndata, &dlm->reco.node_data, list) {
269                 char *st = "unknown";
270                 switch (ndata->state) {
271                         case DLM_RECO_NODE_DATA_INIT:
272                                 st = "init";
273                                 break;
274                         case DLM_RECO_NODE_DATA_REQUESTING:
275                                 st = "requesting";
276                                 break;
277                         case DLM_RECO_NODE_DATA_DEAD:
278                                 st = "dead";
279                                 break;
280                         case DLM_RECO_NODE_DATA_RECEIVING:
281                                 st = "receiving";
282                                 break;
283                         case DLM_RECO_NODE_DATA_REQUESTED:
284                                 st = "requested";
285                                 break;
286                         case DLM_RECO_NODE_DATA_DONE:
287                                 st = "done";
288                                 break;
289                         case DLM_RECO_NODE_DATA_FINALIZE_SENT:
290                                 st = "finalize-sent";
291                                 break;
292                         default:
293                                 st = "bad";
294                                 break;
295                 }
296                 mlog(ML_NOTICE, "%s: reco state, node %u, state=%s\n",
297                      dlm->name, ndata->node_num, st);
298         }
299         list_for_each_entry(res, &dlm->reco.resources, recovering) {
300                 mlog(ML_NOTICE, "%s: lockres %.*s on recovering list\n",
301                      dlm->name, res->lockname.len, res->lockname.name);
302         }
303 }
304
305 #define DLM_RECO_THREAD_TIMEOUT_MS (5 * 1000)
306
307 static int dlm_recovery_thread(void *data)
308 {
309         int status;
310         struct dlm_ctxt *dlm = data;
311         unsigned long timeout = msecs_to_jiffies(DLM_RECO_THREAD_TIMEOUT_MS);
312
313         mlog(0, "dlm thread running for %s...\n", dlm->name);
314
315         while (!kthread_should_stop()) {
316                 if (dlm_joined(dlm)) {
317                         status = dlm_do_recovery(dlm);
318                         if (status == -EAGAIN) {
319                                 /* do not sleep, recheck immediately. */
320                                 continue;
321                         }
322                         if (status < 0)
323                                 mlog_errno(status);
324                 }
325
326                 wait_event_interruptible_timeout(dlm->dlm_reco_thread_wq,
327                                                  kthread_should_stop(),
328                                                  timeout);
329         }
330
331         mlog(0, "quitting DLM recovery thread\n");
332         return 0;
333 }
334
335 /* returns true when the recovery master has contacted us */
336 static int dlm_reco_master_ready(struct dlm_ctxt *dlm)
337 {
338         int ready;
339         spin_lock(&dlm->spinlock);
340         ready = (dlm->reco.new_master != O2NM_INVALID_NODE_NUM);
341         spin_unlock(&dlm->spinlock);
342         return ready;
343 }
344
345 /* returns true if node is no longer in the domain
346  * could be dead or just not joined */
347 int dlm_is_node_dead(struct dlm_ctxt *dlm, u8 node)
348 {
349         int dead;
350         spin_lock(&dlm->spinlock);
351         dead = !test_bit(node, dlm->domain_map);
352         spin_unlock(&dlm->spinlock);
353         return dead;
354 }
355
356 /* returns true if node is no longer in the domain
357  * could be dead or just not joined */
358 static int dlm_is_node_recovered(struct dlm_ctxt *dlm, u8 node)
359 {
360         int recovered;
361         spin_lock(&dlm->spinlock);
362         recovered = !test_bit(node, dlm->recovery_map);
363         spin_unlock(&dlm->spinlock);
364         return recovered;
365 }
366
367
368 int dlm_wait_for_node_death(struct dlm_ctxt *dlm, u8 node, int timeout)
369 {
370         if (timeout) {
371                 mlog(ML_NOTICE, "%s: waiting %dms for notification of "
372                      "death of node %u\n", dlm->name, timeout, node);
373                 wait_event_timeout(dlm->dlm_reco_thread_wq,
374                            dlm_is_node_dead(dlm, node),
375                            msecs_to_jiffies(timeout));
376         } else {
377                 mlog(ML_NOTICE, "%s: waiting indefinitely for notification "
378                      "of death of node %u\n", dlm->name, node);
379                 wait_event(dlm->dlm_reco_thread_wq,
380                            dlm_is_node_dead(dlm, node));
381         }
382         /* for now, return 0 */
383         return 0;
384 }
385
386 int dlm_wait_for_node_recovery(struct dlm_ctxt *dlm, u8 node, int timeout)
387 {
388         if (timeout) {
389                 mlog(0, "%s: waiting %dms for notification of "
390                      "recovery of node %u\n", dlm->name, timeout, node);
391                 wait_event_timeout(dlm->dlm_reco_thread_wq,
392                            dlm_is_node_recovered(dlm, node),
393                            msecs_to_jiffies(timeout));
394         } else {
395                 mlog(0, "%s: waiting indefinitely for notification "
396                      "of recovery of node %u\n", dlm->name, node);
397                 wait_event(dlm->dlm_reco_thread_wq,
398                            dlm_is_node_recovered(dlm, node));
399         }
400         /* for now, return 0 */
401         return 0;
402 }
403
404 /* callers of the top-level api calls (dlmlock/dlmunlock) should
405  * block on the dlm->reco.event when recovery is in progress.
406  * the dlm recovery thread will set this state when it begins
407  * recovering a dead node (as the new master or not) and clear
408  * the state and wake as soon as all affected lock resources have
409  * been marked with the RECOVERY flag */
410 static int dlm_in_recovery(struct dlm_ctxt *dlm)
411 {
412         int in_recovery;
413         spin_lock(&dlm->spinlock);
414         in_recovery = !!(dlm->reco.state & DLM_RECO_STATE_ACTIVE);
415         spin_unlock(&dlm->spinlock);
416         return in_recovery;
417 }
418
419
420 void dlm_wait_for_recovery(struct dlm_ctxt *dlm)
421 {
422         if (dlm_in_recovery(dlm)) {
423                 mlog(0, "%s: reco thread %d in recovery: "
424                      "state=%d, master=%u, dead=%u\n",
425                      dlm->name, dlm->dlm_reco_thread_task->pid,
426                      dlm->reco.state, dlm->reco.new_master,
427                      dlm->reco.dead_node);
428         }
429         wait_event(dlm->reco.event, !dlm_in_recovery(dlm));
430 }
431
432 static void dlm_begin_recovery(struct dlm_ctxt *dlm)
433 {
434         spin_lock(&dlm->spinlock);
435         BUG_ON(dlm->reco.state & DLM_RECO_STATE_ACTIVE);
436         dlm->reco.state |= DLM_RECO_STATE_ACTIVE;
437         spin_unlock(&dlm->spinlock);
438 }
439
440 static void dlm_end_recovery(struct dlm_ctxt *dlm)
441 {
442         spin_lock(&dlm->spinlock);
443         BUG_ON(!(dlm->reco.state & DLM_RECO_STATE_ACTIVE));
444         dlm->reco.state &= ~DLM_RECO_STATE_ACTIVE;
445         spin_unlock(&dlm->spinlock);
446         wake_up(&dlm->reco.event);
447 }
448
449 static int dlm_do_recovery(struct dlm_ctxt *dlm)
450 {
451         int status = 0;
452         int ret;
453
454         spin_lock(&dlm->spinlock);
455
456         /* check to see if the new master has died */
457         if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM &&
458             test_bit(dlm->reco.new_master, dlm->recovery_map)) {
459                 mlog(0, "new master %u died while recovering %u!\n",
460                      dlm->reco.new_master, dlm->reco.dead_node);
461                 /* unset the new_master, leave dead_node */
462                 dlm_set_reco_master(dlm, O2NM_INVALID_NODE_NUM);
463         }
464
465         /* select a target to recover */
466         if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) {
467                 int bit;
468
469                 bit = find_next_bit (dlm->recovery_map, O2NM_MAX_NODES+1, 0);
470                 if (bit >= O2NM_MAX_NODES || bit < 0)
471                         dlm_set_reco_dead_node(dlm, O2NM_INVALID_NODE_NUM);
472                 else
473                         dlm_set_reco_dead_node(dlm, bit);
474         } else if (!test_bit(dlm->reco.dead_node, dlm->recovery_map)) {
475                 /* BUG? */
476                 mlog(ML_ERROR, "dead_node %u no longer in recovery map!\n",
477                      dlm->reco.dead_node);
478                 dlm_set_reco_dead_node(dlm, O2NM_INVALID_NODE_NUM);
479         }
480
481         if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) {
482                 // mlog(0, "nothing to recover!  sleeping now!\n");
483                 spin_unlock(&dlm->spinlock);
484                 /* return to main thread loop and sleep. */
485                 return 0;
486         }
487         mlog(0, "%s(%d):recovery thread found node %u in the recovery map!\n",
488              dlm->name, dlm->dlm_reco_thread_task->pid,
489              dlm->reco.dead_node);
490         spin_unlock(&dlm->spinlock);
491
492         /* take write barrier */
493         /* (stops the list reshuffling thread, proxy ast handling) */
494         dlm_begin_recovery(dlm);
495
496         if (dlm->reco.new_master == dlm->node_num)
497                 goto master_here;
498
499         if (dlm->reco.new_master == O2NM_INVALID_NODE_NUM) {
500                 /* choose a new master, returns 0 if this node
501                  * is the master, -EEXIST if it's another node.
502                  * this does not return until a new master is chosen
503                  * or recovery completes entirely. */
504                 ret = dlm_pick_recovery_master(dlm);
505                 if (!ret) {
506                         /* already notified everyone.  go. */
507                         goto master_here;
508                 }
509                 mlog(0, "another node will master this recovery session.\n");
510         }
511         mlog(0, "dlm=%s (%d), new_master=%u, this node=%u, dead_node=%u\n",
512              dlm->name, dlm->dlm_reco_thread_task->pid, dlm->reco.new_master,
513              dlm->node_num, dlm->reco.dead_node);
514
515         /* it is safe to start everything back up here
516          * because all of the dead node's lock resources
517          * have been marked as in-recovery */
518         dlm_end_recovery(dlm);
519
520         /* sleep out in main dlm_recovery_thread loop. */
521         return 0;
522
523 master_here:
524         mlog(0, "(%d) mastering recovery of %s:%u here(this=%u)!\n",
525              dlm->dlm_reco_thread_task->pid,
526              dlm->name, dlm->reco.dead_node, dlm->node_num);
527
528         status = dlm_remaster_locks(dlm, dlm->reco.dead_node);
529         if (status < 0) {
530                 /* we should never hit this anymore */
531                 mlog(ML_ERROR, "error %d remastering locks for node %u, "
532                      "retrying.\n", status, dlm->reco.dead_node);
533                 /* yield a bit to allow any final network messages
534                  * to get handled on remaining nodes */
535                 msleep(100);
536         } else {
537                 /* success!  see if any other nodes need recovery */
538                 mlog(0, "DONE mastering recovery of %s:%u here(this=%u)!\n",
539                      dlm->name, dlm->reco.dead_node, dlm->node_num);
540                 dlm_reset_recovery(dlm);
541         }
542         dlm_end_recovery(dlm);
543
544         /* continue and look for another dead node */
545         return -EAGAIN;
546 }
547
548 static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node)
549 {
550         int status = 0;
551         struct dlm_reco_node_data *ndata;
552         struct list_head *iter;
553         int all_nodes_done;
554         int destroy = 0;
555         int pass = 0;
556
557         do {
558                 /* we have become recovery master.  there is no escaping
559                  * this, so just keep trying until we get it. */
560                 status = dlm_init_recovery_area(dlm, dead_node);
561                 if (status < 0) {
562                         mlog(ML_ERROR, "%s: failed to alloc recovery area, "
563                              "retrying\n", dlm->name);
564                         msleep(1000);
565                 }
566         } while (status != 0);
567
568         /* safe to access the node data list without a lock, since this
569          * process is the only one to change the list */
570         list_for_each(iter, &dlm->reco.node_data) {
571                 ndata = list_entry (iter, struct dlm_reco_node_data, list);
572                 BUG_ON(ndata->state != DLM_RECO_NODE_DATA_INIT);
573                 ndata->state = DLM_RECO_NODE_DATA_REQUESTING;
574
575                 mlog(0, "requesting lock info from node %u\n",
576                      ndata->node_num);
577
578                 if (ndata->node_num == dlm->node_num) {
579                         ndata->state = DLM_RECO_NODE_DATA_DONE;
580                         continue;
581                 }
582
583                 do {
584                         status = dlm_request_all_locks(dlm, ndata->node_num,
585                                                        dead_node);
586                         if (status < 0) {
587                                 mlog_errno(status);
588                                 if (dlm_is_host_down(status)) {
589                                         /* node died, ignore it for recovery */
590                                         status = 0;
591                                         ndata->state = DLM_RECO_NODE_DATA_DEAD;
592                                         /* wait for the domain map to catch up
593                                          * with the network state. */
594                                         wait_event_timeout(dlm->dlm_reco_thread_wq,
595                                                            dlm_is_node_dead(dlm,
596                                                                 ndata->node_num),
597                                                            msecs_to_jiffies(1000));
598                                         mlog(0, "waited 1 sec for %u, "
599                                              "dead? %s\n", ndata->node_num,
600                                              dlm_is_node_dead(dlm, ndata->node_num) ?
601                                              "yes" : "no");
602                                 } else {
603                                         /* -ENOMEM on the other node */
604                                         mlog(0, "%s: node %u returned "
605                                              "%d during recovery, retrying "
606                                              "after a short wait\n",
607                                              dlm->name, ndata->node_num,
608                                              status);
609                                         msleep(100);
610                                 }
611                         }
612                 } while (status != 0);
613
614                 switch (ndata->state) {
615                         case DLM_RECO_NODE_DATA_INIT:
616                         case DLM_RECO_NODE_DATA_FINALIZE_SENT:
617                         case DLM_RECO_NODE_DATA_REQUESTED:
618                                 BUG();
619                                 break;
620                         case DLM_RECO_NODE_DATA_DEAD:
621                                 mlog(0, "node %u died after requesting "
622                                      "recovery info for node %u\n",
623                                      ndata->node_num, dead_node);
624                                 /* fine.  don't need this node's info.
625                                  * continue without it. */
626                                 break;
627                         case DLM_RECO_NODE_DATA_REQUESTING:
628                                 ndata->state = DLM_RECO_NODE_DATA_REQUESTED;
629                                 mlog(0, "now receiving recovery data from "
630                                      "node %u for dead node %u\n",
631                                      ndata->node_num, dead_node);
632                                 break;
633                         case DLM_RECO_NODE_DATA_RECEIVING:
634                                 mlog(0, "already receiving recovery data from "
635                                      "node %u for dead node %u\n",
636                                      ndata->node_num, dead_node);
637                                 break;
638                         case DLM_RECO_NODE_DATA_DONE:
639                                 mlog(0, "already DONE receiving recovery data "
640                                      "from node %u for dead node %u\n",
641                                      ndata->node_num, dead_node);
642                                 break;
643                 }
644         }
645
646         mlog(0, "done requesting all lock info\n");
647
648         /* nodes should be sending reco data now
649          * just need to wait */
650
651         while (1) {
652                 /* check all the nodes now to see if we are
653                  * done, or if anyone died */
654                 all_nodes_done = 1;
655                 spin_lock(&dlm_reco_state_lock);
656                 list_for_each(iter, &dlm->reco.node_data) {
657                         ndata = list_entry (iter, struct dlm_reco_node_data, list);
658
659                         mlog(0, "checking recovery state of node %u\n",
660                              ndata->node_num);
661                         switch (ndata->state) {
662                                 case DLM_RECO_NODE_DATA_INIT:
663                                 case DLM_RECO_NODE_DATA_REQUESTING:
664                                         mlog(ML_ERROR, "bad ndata state for "
665                                              "node %u: state=%d\n",
666                                              ndata->node_num, ndata->state);
667                                         BUG();
668                                         break;
669                                 case DLM_RECO_NODE_DATA_DEAD:
670                                         mlog(0, "node %u died after "
671                                              "requesting recovery info for "
672                                              "node %u\n", ndata->node_num,
673                                              dead_node);
674                                         break;
675                                 case DLM_RECO_NODE_DATA_RECEIVING:
676                                 case DLM_RECO_NODE_DATA_REQUESTED:
677                                         mlog(0, "%s: node %u still in state %s\n",
678                                              dlm->name, ndata->node_num,
679                                              ndata->state==DLM_RECO_NODE_DATA_RECEIVING ?
680                                              "receiving" : "requested");
681                                         all_nodes_done = 0;
682                                         break;
683                                 case DLM_RECO_NODE_DATA_DONE:
684                                         mlog(0, "%s: node %u state is done\n",
685                                              dlm->name, ndata->node_num);
686                                         break;
687                                 case DLM_RECO_NODE_DATA_FINALIZE_SENT:
688                                         mlog(0, "%s: node %u state is finalize\n",
689                                              dlm->name, ndata->node_num);
690                                         break;
691                         }
692                 }
693                 spin_unlock(&dlm_reco_state_lock);
694
695                 mlog(0, "pass #%d, all_nodes_done?: %s\n", ++pass,
696                      all_nodes_done?"yes":"no");
697                 if (all_nodes_done) {
698                         int ret;
699
700                         /* all nodes are now in DLM_RECO_NODE_DATA_DONE state
701                          * just send a finalize message to everyone and
702                          * clean up */
703                         mlog(0, "all nodes are done! send finalize\n");
704                         ret = dlm_send_finalize_reco_message(dlm);
705                         if (ret < 0)
706                                 mlog_errno(ret);
707
708                         spin_lock(&dlm->spinlock);
709                         dlm_finish_local_lockres_recovery(dlm, dead_node,
710                                                           dlm->node_num);
711                         spin_unlock(&dlm->spinlock);
712                         mlog(0, "should be done with recovery!\n");
713
714                         mlog(0, "finishing recovery of %s at %lu, "
715                              "dead=%u, this=%u, new=%u\n", dlm->name,
716                              jiffies, dlm->reco.dead_node,
717                              dlm->node_num, dlm->reco.new_master);
718                         destroy = 1;
719                         status = 0;
720                         /* rescan everything marked dirty along the way */
721                         dlm_kick_thread(dlm, NULL);
722                         break;
723                 }
724                 /* wait to be signalled, with periodic timeout
725                  * to check for node death */
726                 wait_event_interruptible_timeout(dlm->dlm_reco_thread_wq,
727                                          kthread_should_stop(),
728                                          msecs_to_jiffies(DLM_RECO_THREAD_TIMEOUT_MS));
729
730         }
731
732         if (destroy)
733                 dlm_destroy_recovery_area(dlm, dead_node);
734
735         mlog_exit(status);
736         return status;
737 }
738
739 static int dlm_init_recovery_area(struct dlm_ctxt *dlm, u8 dead_node)
740 {
741         int num=0;
742         struct dlm_reco_node_data *ndata;
743
744         spin_lock(&dlm->spinlock);
745         memcpy(dlm->reco.node_map, dlm->domain_map, sizeof(dlm->domain_map));
746         /* nodes can only be removed (by dying) after dropping
747          * this lock, and death will be trapped later, so this should do */
748         spin_unlock(&dlm->spinlock);
749
750         while (1) {
751                 num = find_next_bit (dlm->reco.node_map, O2NM_MAX_NODES, num);
752                 if (num >= O2NM_MAX_NODES) {
753                         break;
754                 }
755                 BUG_ON(num == dead_node);
756
757                 ndata = kzalloc(sizeof(*ndata), GFP_NOFS);
758                 if (!ndata) {
759                         dlm_destroy_recovery_area(dlm, dead_node);
760                         return -ENOMEM;
761                 }
762                 ndata->node_num = num;
763                 ndata->state = DLM_RECO_NODE_DATA_INIT;
764                 spin_lock(&dlm_reco_state_lock);
765                 list_add_tail(&ndata->list, &dlm->reco.node_data);
766                 spin_unlock(&dlm_reco_state_lock);
767                 num++;
768         }
769
770         return 0;
771 }
772
773 static void dlm_destroy_recovery_area(struct dlm_ctxt *dlm, u8 dead_node)
774 {
775         struct list_head *iter, *iter2;
776         struct dlm_reco_node_data *ndata;
777         LIST_HEAD(tmplist);
778
779         spin_lock(&dlm_reco_state_lock);
780         list_splice_init(&dlm->reco.node_data, &tmplist);
781         spin_unlock(&dlm_reco_state_lock);
782
783         list_for_each_safe(iter, iter2, &tmplist) {
784                 ndata = list_entry (iter, struct dlm_reco_node_data, list);
785                 list_del_init(&ndata->list);
786                 kfree(ndata);
787         }
788 }
789
790 static int dlm_request_all_locks(struct dlm_ctxt *dlm, u8 request_from,
791                                  u8 dead_node)
792 {
793         struct dlm_lock_request lr;
794         enum dlm_status ret;
795
796         mlog(0, "\n");
797
798
799         mlog(0, "dlm_request_all_locks: dead node is %u, sending request "
800                   "to %u\n", dead_node, request_from);
801
802         memset(&lr, 0, sizeof(lr));
803         lr.node_idx = dlm->node_num;
804         lr.dead_node = dead_node;
805
806         // send message
807         ret = DLM_NOLOCKMGR;
808         ret = o2net_send_message(DLM_LOCK_REQUEST_MSG, dlm->key,
809                                  &lr, sizeof(lr), request_from, NULL);
810
811         /* negative status is handled by caller */
812         if (ret < 0)
813                 mlog_errno(ret);
814
815         // return from here, then
816         // sleep until all received or error
817         return ret;
818
819 }
820
821 int dlm_request_all_locks_handler(struct o2net_msg *msg, u32 len, void *data)
822 {
823         struct dlm_ctxt *dlm = data;
824         struct dlm_lock_request *lr = (struct dlm_lock_request *)msg->buf;
825         char *buf = NULL;
826         struct dlm_work_item *item = NULL;
827
828         if (!dlm_grab(dlm))
829                 return -EINVAL;
830
831         if (lr->dead_node != dlm->reco.dead_node) {
832                 mlog(ML_ERROR, "%s: node %u sent dead_node=%u, but local "
833                      "dead_node is %u\n", dlm->name, lr->node_idx,
834                      lr->dead_node, dlm->reco.dead_node);
835                 dlm_print_reco_node_status(dlm);
836                 /* this is a hack */
837                 dlm_put(dlm);
838                 return -ENOMEM;
839         }
840         BUG_ON(lr->dead_node != dlm->reco.dead_node);
841
842         item = kzalloc(sizeof(*item), GFP_NOFS);
843         if (!item) {
844                 dlm_put(dlm);
845                 return -ENOMEM;
846         }
847
848         /* this will get freed by dlm_request_all_locks_worker */
849         buf = (char *) __get_free_page(GFP_NOFS);
850         if (!buf) {
851                 kfree(item);
852                 dlm_put(dlm);
853                 return -ENOMEM;
854         }
855
856         /* queue up work for dlm_request_all_locks_worker */
857         dlm_grab(dlm);  /* get an extra ref for the work item */
858         dlm_init_work_item(dlm, item, dlm_request_all_locks_worker, buf);
859         item->u.ral.reco_master = lr->node_idx;
860         item->u.ral.dead_node = lr->dead_node;
861         spin_lock(&dlm->work_lock);
862         list_add_tail(&item->list, &dlm->work_list);
863         spin_unlock(&dlm->work_lock);
864         queue_work(dlm->dlm_worker, &dlm->dispatched_work);
865
866         dlm_put(dlm);
867         return 0;
868 }
869
870 static void dlm_request_all_locks_worker(struct dlm_work_item *item, void *data)
871 {
872         struct dlm_migratable_lockres *mres;
873         struct dlm_lock_resource *res;
874         struct dlm_ctxt *dlm;
875         LIST_HEAD(resources);
876         struct list_head *iter;
877         int ret;
878         u8 dead_node, reco_master;
879         int skip_all_done = 0;
880
881         dlm = item->dlm;
882         dead_node = item->u.ral.dead_node;
883         reco_master = item->u.ral.reco_master;
884         mres = (struct dlm_migratable_lockres *)data;
885
886         mlog(0, "%s: recovery worker started, dead=%u, master=%u\n",
887              dlm->name, dead_node, reco_master);
888
889         if (dead_node != dlm->reco.dead_node ||
890             reco_master != dlm->reco.new_master) {
891                 /* worker could have been created before the recovery master
892                  * died.  if so, do not continue, but do not error. */
893                 if (dlm->reco.new_master == O2NM_INVALID_NODE_NUM) {
894                         mlog(ML_NOTICE, "%s: will not send recovery state, "
895                              "recovery master %u died, thread=(dead=%u,mas=%u)"
896                              " current=(dead=%u,mas=%u)\n", dlm->name,
897                              reco_master, dead_node, reco_master,
898                              dlm->reco.dead_node, dlm->reco.new_master);
899                 } else {
900                         mlog(ML_NOTICE, "%s: reco state invalid: reco(dead=%u, "
901                              "master=%u), request(dead=%u, master=%u)\n",
902                              dlm->name, dlm->reco.dead_node,
903                              dlm->reco.new_master, dead_node, reco_master);
904                 }
905                 goto leave;
906         }
907
908         /* lock resources should have already been moved to the
909          * dlm->reco.resources list.  now move items from that list
910          * to a temp list if the dead owner matches.  note that the
911          * whole cluster recovers only one node at a time, so we
912          * can safely move UNKNOWN lock resources for each recovery
913          * session. */
914         dlm_move_reco_locks_to_list(dlm, &resources, dead_node);
915
916         /* now we can begin blasting lockreses without the dlm lock */
917
918         /* any errors returned will be due to the new_master dying,
919          * the dlm_reco_thread should detect this */
920         list_for_each(iter, &resources) {
921                 res = list_entry (iter, struct dlm_lock_resource, recovering);
922                 ret = dlm_send_one_lockres(dlm, res, mres, reco_master,
923                                         DLM_MRES_RECOVERY);
924                 if (ret < 0) {
925                         mlog(ML_ERROR, "%s: node %u went down while sending "
926                              "recovery state for dead node %u, ret=%d\n", dlm->name,
927                              reco_master, dead_node, ret);
928                         skip_all_done = 1;
929                         break;
930                 }
931         }
932
933         /* move the resources back to the list */
934         spin_lock(&dlm->spinlock);
935         list_splice_init(&resources, &dlm->reco.resources);
936         spin_unlock(&dlm->spinlock);
937
938         if (!skip_all_done) {
939                 ret = dlm_send_all_done_msg(dlm, dead_node, reco_master);
940                 if (ret < 0) {
941                         mlog(ML_ERROR, "%s: node %u went down while sending "
942                              "recovery all-done for dead node %u, ret=%d\n",
943                              dlm->name, reco_master, dead_node, ret);
944                 }
945         }
946 leave:
947         free_page((unsigned long)data);
948 }
949
950
951 static int dlm_send_all_done_msg(struct dlm_ctxt *dlm, u8 dead_node, u8 send_to)
952 {
953         int ret, tmpret;
954         struct dlm_reco_data_done done_msg;
955
956         memset(&done_msg, 0, sizeof(done_msg));
957         done_msg.node_idx = dlm->node_num;
958         done_msg.dead_node = dead_node;
959         mlog(0, "sending DATA DONE message to %u, "
960              "my node=%u, dead node=%u\n", send_to, done_msg.node_idx,
961              done_msg.dead_node);
962
963         ret = o2net_send_message(DLM_RECO_DATA_DONE_MSG, dlm->key, &done_msg,
964                                  sizeof(done_msg), send_to, &tmpret);
965         if (ret < 0) {
966                 if (!dlm_is_host_down(ret)) {
967                         mlog_errno(ret);
968                         mlog(ML_ERROR, "%s: unknown error sending data-done "
969                              "to %u\n", dlm->name, send_to);
970                         BUG();
971                 }
972         } else
973                 ret = tmpret;
974         return ret;
975 }
976
977
978 int dlm_reco_data_done_handler(struct o2net_msg *msg, u32 len, void *data)
979 {
980         struct dlm_ctxt *dlm = data;
981         struct dlm_reco_data_done *done = (struct dlm_reco_data_done *)msg->buf;
982         struct list_head *iter;
983         struct dlm_reco_node_data *ndata = NULL;
984         int ret = -EINVAL;
985
986         if (!dlm_grab(dlm))
987                 return -EINVAL;
988
989         mlog(0, "got DATA DONE: dead_node=%u, reco.dead_node=%u, "
990              "node_idx=%u, this node=%u\n", done->dead_node,
991              dlm->reco.dead_node, done->node_idx, dlm->node_num);
992
993         mlog_bug_on_msg((done->dead_node != dlm->reco.dead_node),
994                         "Got DATA DONE: dead_node=%u, reco.dead_node=%u, "
995                         "node_idx=%u, this node=%u\n", done->dead_node,
996                         dlm->reco.dead_node, done->node_idx, dlm->node_num);
997
998         spin_lock(&dlm_reco_state_lock);
999         list_for_each(iter, &dlm->reco.node_data) {
1000                 ndata = list_entry (iter, struct dlm_reco_node_data, list);
1001                 if (ndata->node_num != done->node_idx)
1002                         continue;
1003
1004                 switch (ndata->state) {
1005                         /* should have moved beyond INIT but not to FINALIZE yet */
1006                         case DLM_RECO_NODE_DATA_INIT:
1007                         case DLM_RECO_NODE_DATA_DEAD:
1008                         case DLM_RECO_NODE_DATA_FINALIZE_SENT:
1009                                 mlog(ML_ERROR, "bad ndata state for node %u:"
1010                                      " state=%d\n", ndata->node_num,
1011                                      ndata->state);
1012                                 BUG();
1013                                 break;
1014                         /* these states are possible at this point, anywhere along
1015                          * the line of recovery */
1016                         case DLM_RECO_NODE_DATA_DONE:
1017                         case DLM_RECO_NODE_DATA_RECEIVING:
1018                         case DLM_RECO_NODE_DATA_REQUESTED:
1019                         case DLM_RECO_NODE_DATA_REQUESTING:
1020                                 mlog(0, "node %u is DONE sending "
1021                                           "recovery data!\n",
1022                                           ndata->node_num);
1023
1024                                 ndata->state = DLM_RECO_NODE_DATA_DONE;
1025                                 ret = 0;
1026                                 break;
1027                 }
1028         }
1029         spin_unlock(&dlm_reco_state_lock);
1030
1031         /* wake the recovery thread, some node is done */
1032         if (!ret)
1033                 dlm_kick_recovery_thread(dlm);
1034
1035         if (ret < 0)
1036                 mlog(ML_ERROR, "failed to find recovery node data for node "
1037                      "%u\n", done->node_idx);
1038         dlm_put(dlm);
1039
1040         mlog(0, "leaving reco data done handler, ret=%d\n", ret);
1041         return ret;
1042 }
1043
1044 static void dlm_move_reco_locks_to_list(struct dlm_ctxt *dlm,
1045                                         struct list_head *list,
1046                                         u8 dead_node)
1047 {
1048         struct dlm_lock_resource *res;
1049         struct list_head *iter, *iter2;
1050         struct dlm_lock *lock;
1051
1052         spin_lock(&dlm->spinlock);
1053         list_for_each_safe(iter, iter2, &dlm->reco.resources) {
1054                 res = list_entry (iter, struct dlm_lock_resource, recovering);
1055                 /* always prune any $RECOVERY entries for dead nodes,
1056                  * otherwise hangs can occur during later recovery */
1057                 if (dlm_is_recovery_lock(res->lockname.name,
1058                                          res->lockname.len)) {
1059                         spin_lock(&res->spinlock);
1060                         list_for_each_entry(lock, &res->granted, list) {
1061                                 if (lock->ml.node == dead_node) {
1062                                         mlog(0, "AHA! there was "
1063                                              "a $RECOVERY lock for dead "
1064                                              "node %u (%s)!\n", 
1065                                              dead_node, dlm->name);
1066                                         list_del_init(&lock->list);
1067                                         dlm_lock_put(lock);
1068                                         break;
1069                                 }
1070                         }
1071                         spin_unlock(&res->spinlock);
1072                         continue;
1073                 }
1074
1075                 if (res->owner == dead_node) {
1076                         mlog(0, "found lockres owned by dead node while "
1077                                   "doing recovery for node %u. sending it.\n",
1078                                   dead_node);
1079                         list_move_tail(&res->recovering, list);
1080                 } else if (res->owner == DLM_LOCK_RES_OWNER_UNKNOWN) {
1081                         mlog(0, "found UNKNOWN owner while doing recovery "
1082                                   "for node %u. sending it.\n", dead_node);
1083                         list_move_tail(&res->recovering, list);
1084                 }
1085         }
1086         spin_unlock(&dlm->spinlock);
1087 }
1088
1089 static inline int dlm_num_locks_in_lockres(struct dlm_lock_resource *res)
1090 {
1091         int total_locks = 0;
1092         struct list_head *iter, *queue = &res->granted;
1093         int i;
1094
1095         for (i=0; i<3; i++) {
1096                 list_for_each(iter, queue)
1097                         total_locks++;
1098                 queue++;
1099         }
1100         return total_locks;
1101 }
1102
1103
1104 static int dlm_send_mig_lockres_msg(struct dlm_ctxt *dlm,
1105                                       struct dlm_migratable_lockres *mres,
1106                                       u8 send_to,
1107                                       struct dlm_lock_resource *res,
1108                                       int total_locks)
1109 {
1110         u64 mig_cookie = be64_to_cpu(mres->mig_cookie);
1111         int mres_total_locks = be32_to_cpu(mres->total_locks);
1112         int sz, ret = 0, status = 0;
1113         u8 orig_flags = mres->flags,
1114            orig_master = mres->master;
1115
1116         BUG_ON(mres->num_locks > DLM_MAX_MIGRATABLE_LOCKS);
1117         if (!mres->num_locks)
1118                 return 0;
1119
1120         sz = sizeof(struct dlm_migratable_lockres) +
1121                 (mres->num_locks * sizeof(struct dlm_migratable_lock));
1122
1123         /* add an all-done flag if we reached the last lock */
1124         orig_flags = mres->flags;
1125         BUG_ON(total_locks > mres_total_locks);
1126         if (total_locks == mres_total_locks)
1127                 mres->flags |= DLM_MRES_ALL_DONE;
1128
1129         mlog(0, "%s:%.*s: sending mig lockres (%s) to %u\n",
1130              dlm->name, res->lockname.len, res->lockname.name,
1131              orig_flags & DLM_MRES_MIGRATION ? "migrate" : "recovery",
1132              send_to);
1133
1134         /* send it */
1135         ret = o2net_send_message(DLM_MIG_LOCKRES_MSG, dlm->key, mres,
1136                                  sz, send_to, &status);
1137         if (ret < 0) {
1138                 /* XXX: negative status is not handled.
1139                  * this will end up killing this node. */
1140                 mlog_errno(ret);
1141         } else {
1142                 /* might get an -ENOMEM back here */
1143                 ret = status;
1144                 if (ret < 0) {
1145                         mlog_errno(ret);
1146
1147                         if (ret == -EFAULT) {
1148                                 mlog(ML_ERROR, "node %u told me to kill "
1149                                      "myself!\n", send_to);
1150                                 BUG();
1151                         }
1152                 }
1153         }
1154
1155         /* zero and reinit the message buffer */
1156         dlm_init_migratable_lockres(mres, res->lockname.name,
1157                                     res->lockname.len, mres_total_locks,
1158                                     mig_cookie, orig_flags, orig_master);
1159         return ret;
1160 }
1161
1162 static void dlm_init_migratable_lockres(struct dlm_migratable_lockres *mres,
1163                                         const char *lockname, int namelen,
1164                                         int total_locks, u64 cookie,
1165                                         u8 flags, u8 master)
1166 {
1167         /* mres here is one full page */
1168         memset(mres, 0, PAGE_SIZE);
1169         mres->lockname_len = namelen;
1170         memcpy(mres->lockname, lockname, namelen);
1171         mres->num_locks = 0;
1172         mres->total_locks = cpu_to_be32(total_locks);
1173         mres->mig_cookie = cpu_to_be64(cookie);
1174         mres->flags = flags;
1175         mres->master = master;
1176 }
1177
1178
1179 /* returns 1 if this lock fills the network structure,
1180  * 0 otherwise */
1181 static int dlm_add_lock_to_array(struct dlm_lock *lock,
1182                                  struct dlm_migratable_lockres *mres, int queue)
1183 {
1184         struct dlm_migratable_lock *ml;
1185         int lock_num = mres->num_locks;
1186
1187         ml = &(mres->ml[lock_num]);
1188         ml->cookie = lock->ml.cookie;
1189         ml->type = lock->ml.type;
1190         ml->convert_type = lock->ml.convert_type;
1191         ml->highest_blocked = lock->ml.highest_blocked;
1192         ml->list = queue;
1193         if (lock->lksb) {
1194                 ml->flags = lock->lksb->flags;
1195                 /* send our current lvb */
1196                 if (ml->type == LKM_EXMODE ||
1197                     ml->type == LKM_PRMODE) {
1198                         /* if it is already set, this had better be a PR
1199                          * and it has to match */
1200                         if (!dlm_lvb_is_empty(mres->lvb) &&
1201                             (ml->type == LKM_EXMODE ||
1202                              memcmp(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN))) {
1203                                 mlog(ML_ERROR, "mismatched lvbs!\n");
1204                                 __dlm_print_one_lock_resource(lock->lockres);
1205                                 BUG();
1206                         }
1207                         memcpy(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN);
1208                 }
1209         }
1210         ml->node = lock->ml.node;
1211         mres->num_locks++;
1212         /* we reached the max, send this network message */
1213         if (mres->num_locks == DLM_MAX_MIGRATABLE_LOCKS)
1214                 return 1;
1215         return 0;
1216 }
1217
1218 static void dlm_add_dummy_lock(struct dlm_ctxt *dlm,
1219                                struct dlm_migratable_lockres *mres)
1220 {
1221         struct dlm_lock dummy;
1222         memset(&dummy, 0, sizeof(dummy));
1223         dummy.ml.cookie = 0;
1224         dummy.ml.type = LKM_IVMODE;
1225         dummy.ml.convert_type = LKM_IVMODE;
1226         dummy.ml.highest_blocked = LKM_IVMODE;
1227         dummy.lksb = NULL;
1228         dummy.ml.node = dlm->node_num;
1229         dlm_add_lock_to_array(&dummy, mres, DLM_BLOCKED_LIST);
1230 }
1231
1232 static inline int dlm_is_dummy_lock(struct dlm_ctxt *dlm,
1233                                     struct dlm_migratable_lock *ml,
1234                                     u8 *nodenum)
1235 {
1236         if (unlikely(ml->cookie == 0 &&
1237             ml->type == LKM_IVMODE &&
1238             ml->convert_type == LKM_IVMODE &&
1239             ml->highest_blocked == LKM_IVMODE &&
1240             ml->list == DLM_BLOCKED_LIST)) {
1241                 *nodenum = ml->node;
1242                 return 1;
1243         }
1244         return 0;
1245 }
1246
1247 int dlm_send_one_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
1248                          struct dlm_migratable_lockres *mres,
1249                          u8 send_to, u8 flags)
1250 {
1251         struct list_head *queue, *iter;
1252         int total_locks, i;
1253         u64 mig_cookie = 0;
1254         struct dlm_lock *lock;
1255         int ret = 0;
1256
1257         BUG_ON(!(flags & (DLM_MRES_RECOVERY|DLM_MRES_MIGRATION)));
1258
1259         mlog(0, "sending to %u\n", send_to);
1260
1261         total_locks = dlm_num_locks_in_lockres(res);
1262         if (total_locks > DLM_MAX_MIGRATABLE_LOCKS) {
1263                 /* rare, but possible */
1264                 mlog(0, "argh.  lockres has %d locks.  this will "
1265                           "require more than one network packet to "
1266                           "migrate\n", total_locks);
1267                 mig_cookie = dlm_get_next_mig_cookie();
1268         }
1269
1270         dlm_init_migratable_lockres(mres, res->lockname.name,
1271                                     res->lockname.len, total_locks,
1272                                     mig_cookie, flags, res->owner);
1273
1274         total_locks = 0;
1275         for (i=DLM_GRANTED_LIST; i<=DLM_BLOCKED_LIST; i++) {
1276                 queue = dlm_list_idx_to_ptr(res, i);
1277                 list_for_each(iter, queue) {
1278                         lock = list_entry (iter, struct dlm_lock, list);
1279
1280                         /* add another lock. */
1281                         total_locks++;
1282                         if (!dlm_add_lock_to_array(lock, mres, i))
1283                                 continue;
1284
1285                         /* this filled the lock message,
1286                          * we must send it immediately. */
1287                         ret = dlm_send_mig_lockres_msg(dlm, mres, send_to,
1288                                                        res, total_locks);
1289                         if (ret < 0)
1290                                 goto error;
1291                 }
1292         }
1293         if (total_locks == 0) {
1294                 /* send a dummy lock to indicate a mastery reference only */
1295                 mlog(0, "%s:%.*s: sending dummy lock to %u, %s\n",
1296                      dlm->name, res->lockname.len, res->lockname.name,
1297                      send_to, flags & DLM_MRES_RECOVERY ? "recovery" :
1298                      "migration");
1299                 dlm_add_dummy_lock(dlm, mres);
1300         }
1301         /* flush any remaining locks */
1302         ret = dlm_send_mig_lockres_msg(dlm, mres, send_to, res, total_locks);
1303         if (ret < 0)
1304                 goto error;
1305         return ret;
1306
1307 error:
1308         mlog(ML_ERROR, "%s: dlm_send_mig_lockres_msg returned %d\n",
1309              dlm->name, ret);
1310         if (!dlm_is_host_down(ret))
1311                 BUG();
1312         mlog(0, "%s: node %u went down while sending %s "
1313              "lockres %.*s\n", dlm->name, send_to,
1314              flags & DLM_MRES_RECOVERY ?  "recovery" : "migration",
1315              res->lockname.len, res->lockname.name);
1316         return ret;
1317 }
1318
1319
1320
1321 /*
1322  * this message will contain no more than one page worth of
1323  * recovery data, and it will work on only one lockres.
1324  * there may be many locks in this page, and we may need to wait
1325  * for additional packets to complete all the locks (rare, but
1326  * possible).
1327  */
1328 /*
1329  * NOTE: the allocation error cases here are scary
1330  * we really cannot afford to fail an alloc in recovery
1331  * do we spin?  returning an error only delays the problem really
1332  */
1333
1334 int dlm_mig_lockres_handler(struct o2net_msg *msg, u32 len, void *data)
1335 {
1336         struct dlm_ctxt *dlm = data;
1337         struct dlm_migratable_lockres *mres =
1338                 (struct dlm_migratable_lockres *)msg->buf;
1339         int ret = 0;
1340         u8 real_master;
1341         char *buf = NULL;
1342         struct dlm_work_item *item = NULL;
1343         struct dlm_lock_resource *res = NULL;
1344
1345         if (!dlm_grab(dlm))
1346                 return -EINVAL;
1347
1348         BUG_ON(!(mres->flags & (DLM_MRES_RECOVERY|DLM_MRES_MIGRATION)));
1349
1350         real_master = mres->master;
1351         if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) {
1352                 /* cannot migrate a lockres with no master */
1353                 BUG_ON(!(mres->flags & DLM_MRES_RECOVERY));
1354         }
1355
1356         mlog(0, "%s message received from node %u\n",
1357                   (mres->flags & DLM_MRES_RECOVERY) ?
1358                   "recovery" : "migration", mres->master);
1359         if (mres->flags & DLM_MRES_ALL_DONE)
1360                 mlog(0, "all done flag.  all lockres data received!\n");
1361
1362         ret = -ENOMEM;
1363         buf = kmalloc(be16_to_cpu(msg->data_len), GFP_NOFS);
1364         item = kzalloc(sizeof(*item), GFP_NOFS);
1365         if (!buf || !item)
1366                 goto leave;
1367
1368         /* lookup the lock to see if we have a secondary queue for this
1369          * already...  just add the locks in and this will have its owner
1370          * and RECOVERY flag changed when it completes. */
1371         res = dlm_lookup_lockres(dlm, mres->lockname, mres->lockname_len);
1372         if (res) {
1373                 /* this will get a ref on res */
1374                 /* mark it as recovering/migrating and hash it */
1375                 spin_lock(&res->spinlock);
1376                 if (mres->flags & DLM_MRES_RECOVERY) {
1377                         res->state |= DLM_LOCK_RES_RECOVERING;
1378                 } else {
1379                         if (res->state & DLM_LOCK_RES_MIGRATING) {
1380                                 /* this is at least the second
1381                                  * lockres message */
1382                                 mlog(0, "lock %.*s is already migrating\n",
1383                                           mres->lockname_len,
1384                                           mres->lockname);
1385                         } else if (res->state & DLM_LOCK_RES_RECOVERING) {
1386                                 /* caller should BUG */
1387                                 mlog(ML_ERROR, "node is attempting to migrate "
1388                                      "lock %.*s, but marked as recovering!\n",
1389                                      mres->lockname_len, mres->lockname);
1390                                 ret = -EFAULT;
1391                                 spin_unlock(&res->spinlock);
1392                                 goto leave;
1393                         }
1394                         res->state |= DLM_LOCK_RES_MIGRATING;
1395                 }
1396                 spin_unlock(&res->spinlock);
1397         } else {
1398                 /* need to allocate, just like if it was
1399                  * mastered here normally  */
1400                 res = dlm_new_lockres(dlm, mres->lockname, mres->lockname_len);
1401                 if (!res)
1402                         goto leave;
1403
1404                 /* to match the ref that we would have gotten if
1405                  * dlm_lookup_lockres had succeeded */
1406                 dlm_lockres_get(res);
1407
1408                 /* mark it as recovering/migrating and hash it */
1409                 if (mres->flags & DLM_MRES_RECOVERY)
1410                         res->state |= DLM_LOCK_RES_RECOVERING;
1411                 else
1412                         res->state |= DLM_LOCK_RES_MIGRATING;
1413
1414                 spin_lock(&dlm->spinlock);
1415                 __dlm_insert_lockres(dlm, res);
1416                 spin_unlock(&dlm->spinlock);
1417
1418                 /* now that the new lockres is inserted,
1419                  * make it usable by other processes */
1420                 spin_lock(&res->spinlock);
1421                 res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
1422                 spin_unlock(&res->spinlock);
1423                 wake_up(&res->wq);
1424
1425                 /* add an extra ref for just-allocated lockres 
1426                  * otherwise the lockres will be purged immediately */
1427                 dlm_lockres_get(res);
1428         }
1429
1430         /* at this point we have allocated everything we need,
1431          * and we have a hashed lockres with an extra ref and
1432          * the proper res->state flags. */
1433         ret = 0;
1434         spin_lock(&res->spinlock);
1435         /* drop this either when master requery finds a different master
1436          * or when a lock is added by the recovery worker */
1437         dlm_lockres_grab_inflight_ref(dlm, res);
1438         if (mres->master == DLM_LOCK_RES_OWNER_UNKNOWN) {
1439                 /* migration cannot have an unknown master */
1440                 BUG_ON(!(mres->flags & DLM_MRES_RECOVERY));
1441                 mlog(0, "recovery has passed me a lockres with an "
1442                           "unknown owner.. will need to requery: "
1443                           "%.*s\n", mres->lockname_len, mres->lockname);
1444         } else {
1445                 /* take a reference now to pin the lockres, drop it
1446                  * when locks are added in the worker */
1447                 dlm_change_lockres_owner(dlm, res, dlm->node_num);
1448         }
1449         spin_unlock(&res->spinlock);
1450
1451         /* queue up work for dlm_mig_lockres_worker */
1452         dlm_grab(dlm);  /* get an extra ref for the work item */
1453         memcpy(buf, msg->buf, be16_to_cpu(msg->data_len));  /* copy the whole message */
1454         dlm_init_work_item(dlm, item, dlm_mig_lockres_worker, buf);
1455         item->u.ml.lockres = res; /* already have a ref */
1456         item->u.ml.real_master = real_master;
1457         spin_lock(&dlm->work_lock);
1458         list_add_tail(&item->list, &dlm->work_list);
1459         spin_unlock(&dlm->work_lock);
1460         queue_work(dlm->dlm_worker, &dlm->dispatched_work);
1461
1462 leave:
1463         dlm_put(dlm);
1464         if (ret < 0) {
1465                 if (buf)
1466                         kfree(buf);
1467                 if (item)
1468                         kfree(item);
1469         }
1470
1471         mlog_exit(ret);
1472         return ret;
1473 }
1474
1475
1476 static void dlm_mig_lockres_worker(struct dlm_work_item *item, void *data)
1477 {
1478         struct dlm_ctxt *dlm = data;
1479         struct dlm_migratable_lockres *mres;
1480         int ret = 0;
1481         struct dlm_lock_resource *res;
1482         u8 real_master;
1483
1484         dlm = item->dlm;
1485         mres = (struct dlm_migratable_lockres *)data;
1486
1487         res = item->u.ml.lockres;
1488         real_master = item->u.ml.real_master;
1489
1490         if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) {
1491                 /* this case is super-rare. only occurs if
1492                  * node death happens during migration. */
1493 again:
1494                 ret = dlm_lockres_master_requery(dlm, res, &real_master);
1495                 if (ret < 0) {
1496                         mlog(0, "dlm_lockres_master_requery ret=%d\n",
1497                                   ret);
1498                         goto again;
1499                 }
1500                 if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) {
1501                         mlog(0, "lockres %.*s not claimed.  "
1502                                    "this node will take it.\n",
1503                                    res->lockname.len, res->lockname.name);
1504                 } else {
1505                         spin_lock(&res->spinlock);
1506                         dlm_lockres_drop_inflight_ref(dlm, res);
1507                         spin_unlock(&res->spinlock);
1508                         mlog(0, "master needs to respond to sender "
1509                                   "that node %u still owns %.*s\n",
1510                                   real_master, res->lockname.len,
1511                                   res->lockname.name);
1512                         /* cannot touch this lockres */
1513                         goto leave;
1514                 }
1515         }
1516
1517         ret = dlm_process_recovery_data(dlm, res, mres);
1518         if (ret < 0)
1519                 mlog(0, "dlm_process_recovery_data returned  %d\n", ret);
1520         else
1521                 mlog(0, "dlm_process_recovery_data succeeded\n");
1522
1523         if ((mres->flags & (DLM_MRES_MIGRATION|DLM_MRES_ALL_DONE)) ==
1524                            (DLM_MRES_MIGRATION|DLM_MRES_ALL_DONE)) {
1525                 ret = dlm_finish_migration(dlm, res, mres->master);
1526                 if (ret < 0)
1527                         mlog_errno(ret);
1528         }
1529
1530 leave:
1531         kfree(data);
1532         mlog_exit(ret);
1533 }
1534
1535
1536
1537 static int dlm_lockres_master_requery(struct dlm_ctxt *dlm,
1538                                       struct dlm_lock_resource *res,
1539                                       u8 *real_master)
1540 {
1541         struct dlm_node_iter iter;
1542         int nodenum;
1543         int ret = 0;
1544
1545         *real_master = DLM_LOCK_RES_OWNER_UNKNOWN;
1546
1547         /* we only reach here if one of the two nodes in a
1548          * migration died while the migration was in progress.
1549          * at this point we need to requery the master.  we
1550          * know that the new_master got as far as creating
1551          * an mle on at least one node, but we do not know
1552          * if any nodes had actually cleared the mle and set
1553          * the master to the new_master.  the old master
1554          * is supposed to set the owner to UNKNOWN in the
1555          * event of a new_master death, so the only possible
1556          * responses that we can get from nodes here are
1557          * that the master is new_master, or that the master
1558          * is UNKNOWN.
1559          * if all nodes come back with UNKNOWN then we know
1560          * the lock needs remastering here.
1561          * if any node comes back with a valid master, check
1562          * to see if that master is the one that we are
1563          * recovering.  if so, then the new_master died and
1564          * we need to remaster this lock.  if not, then the
1565          * new_master survived and that node will respond to
1566          * other nodes about the owner.
1567          * if there is an owner, this node needs to dump this
1568          * lockres and alert the sender that this lockres
1569          * was rejected. */
1570         spin_lock(&dlm->spinlock);
1571         dlm_node_iter_init(dlm->domain_map, &iter);
1572         spin_unlock(&dlm->spinlock);
1573
1574         while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
1575                 /* do not send to self */
1576                 if (nodenum == dlm->node_num)
1577                         continue;
1578                 ret = dlm_do_master_requery(dlm, res, nodenum, real_master);
1579                 if (ret < 0) {
1580                         mlog_errno(ret);
1581                         if (!dlm_is_host_down(ret))
1582                                 BUG();
1583                         /* host is down, so answer for that node would be
1584                          * DLM_LOCK_RES_OWNER_UNKNOWN.  continue. */
1585                 }
1586                 if (*real_master != DLM_LOCK_RES_OWNER_UNKNOWN) {
1587                         mlog(0, "lock master is %u\n", *real_master);
1588                         break;
1589                 }
1590         }
1591         return ret;
1592 }
1593
1594
1595 int dlm_do_master_requery(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
1596                           u8 nodenum, u8 *real_master)
1597 {
1598         int ret = -EINVAL;
1599         struct dlm_master_requery req;
1600         int status = DLM_LOCK_RES_OWNER_UNKNOWN;
1601
1602         memset(&req, 0, sizeof(req));
1603         req.node_idx = dlm->node_num;
1604         req.namelen = res->lockname.len;
1605         memcpy(req.name, res->lockname.name, res->lockname.len);
1606
1607         ret = o2net_send_message(DLM_MASTER_REQUERY_MSG, dlm->key,
1608                                  &req, sizeof(req), nodenum, &status);
1609         /* XXX: negative status not handled properly here. */
1610         if (ret < 0)
1611                 mlog_errno(ret);
1612         else {
1613                 BUG_ON(status < 0);
1614                 BUG_ON(status > DLM_LOCK_RES_OWNER_UNKNOWN);
1615                 *real_master = (u8) (status & 0xff);
1616                 mlog(0, "node %u responded to master requery with %u\n",
1617                           nodenum, *real_master);
1618                 ret = 0;
1619         }
1620         return ret;
1621 }
1622
1623
1624 /* this function cannot error, so unless the sending
1625  * or receiving of the message failed, the owner can
1626  * be trusted */
1627 int dlm_master_requery_handler(struct o2net_msg *msg, u32 len, void *data)
1628 {
1629         struct dlm_ctxt *dlm = data;
1630         struct dlm_master_requery *req = (struct dlm_master_requery *)msg->buf;
1631         struct dlm_lock_resource *res = NULL;
1632         unsigned int hash;
1633         int master = DLM_LOCK_RES_OWNER_UNKNOWN;
1634         u32 flags = DLM_ASSERT_MASTER_REQUERY;
1635
1636         if (!dlm_grab(dlm)) {
1637                 /* since the domain has gone away on this
1638                  * node, the proper response is UNKNOWN */
1639                 return master;
1640         }
1641
1642         hash = dlm_lockid_hash(req->name, req->namelen);
1643
1644         spin_lock(&dlm->spinlock);
1645         res = __dlm_lookup_lockres(dlm, req->name, req->namelen, hash);
1646         if (res) {
1647                 spin_lock(&res->spinlock);
1648                 master = res->owner;
1649                 if (master == dlm->node_num) {
1650                         int ret = dlm_dispatch_assert_master(dlm, res,
1651                                                              0, 0, flags);
1652                         if (ret < 0) {
1653                                 mlog_errno(-ENOMEM);
1654                                 /* retry!? */
1655                                 BUG();
1656                         }
1657                 }
1658                 spin_unlock(&res->spinlock);
1659         }
1660         spin_unlock(&dlm->spinlock);
1661
1662         dlm_put(dlm);
1663         return master;
1664 }
1665
1666 static inline struct list_head *
1667 dlm_list_num_to_pointer(struct dlm_lock_resource *res, int list_num)
1668 {
1669         struct list_head *ret;
1670         BUG_ON(list_num < 0);
1671         BUG_ON(list_num > 2);
1672         ret = &(res->granted);
1673         ret += list_num;
1674         return ret;
1675 }
1676 /* TODO: do ast flush business
1677  * TODO: do MIGRATING and RECOVERING spinning
1678  */
1679
1680 /*
1681 * NOTE about in-flight requests during migration:
1682 *
1683 * Before attempting the migrate, the master has marked the lockres as
1684 * MIGRATING and then flushed all of its pending ASTS.  So any in-flight
1685 * requests either got queued before the MIGRATING flag got set, in which
1686 * case the lock data will reflect the change and a return message is on
1687 * the way, or the request failed to get in before MIGRATING got set.  In
1688 * this case, the caller will be told to spin and wait for the MIGRATING
1689 * flag to be dropped, then recheck the master.
1690 * This holds true for the convert, cancel and unlock cases, and since lvb
1691 * updates are tied to these same messages, it applies to lvb updates as
1692 * well.  For the lock case, there is no way a lock can be on the master
1693 * queue and not be on the secondary queue since the lock is always added
1694 * locally first.  This means that the new target node will never be sent
1695 * a lock that he doesn't already have on the list.
1696 * In total, this means that the local lock is correct and should not be
1697 * updated to match the one sent by the master.  Any messages sent back
1698 * from the master before the MIGRATING flag will bring the lock properly
1699 * up-to-date, and the change will be ordered properly for the waiter.
1700 * We will *not* attempt to modify the lock underneath the waiter.
1701 */
1702
1703 static int dlm_process_recovery_data(struct dlm_ctxt *dlm,
1704                                      struct dlm_lock_resource *res,
1705                                      struct dlm_migratable_lockres *mres)
1706 {
1707         struct dlm_migratable_lock *ml;
1708         struct list_head *queue;
1709         struct list_head *tmpq = NULL;
1710         struct dlm_lock *newlock = NULL;
1711         struct dlm_lockstatus *lksb = NULL;
1712         int ret = 0;
1713         int i, j, bad;
1714         struct list_head *iter;
1715         struct dlm_lock *lock = NULL;
1716         u8 from = O2NM_MAX_NODES;
1717         unsigned int added = 0;
1718
1719         mlog(0, "running %d locks for this lockres\n", mres->num_locks);
1720         for (i=0; i<mres->num_locks; i++) {
1721                 ml = &(mres->ml[i]);
1722
1723                 if (dlm_is_dummy_lock(dlm, ml, &from)) {
1724                         /* placeholder, just need to set the refmap bit */
1725                         BUG_ON(mres->num_locks != 1);
1726                         mlog(0, "%s:%.*s: dummy lock for %u\n",
1727                              dlm->name, mres->lockname_len, mres->lockname,
1728                              from);
1729                         spin_lock(&res->spinlock);
1730                         dlm_lockres_set_refmap_bit(from, res);
1731                         spin_unlock(&res->spinlock);
1732                         added++;
1733                         break;
1734                 }
1735                 BUG_ON(ml->highest_blocked != LKM_IVMODE);
1736                 newlock = NULL;
1737                 lksb = NULL;
1738
1739                 queue = dlm_list_num_to_pointer(res, ml->list);
1740                 tmpq = NULL;
1741
1742                 /* if the lock is for the local node it needs to
1743                  * be moved to the proper location within the queue.
1744                  * do not allocate a new lock structure. */
1745                 if (ml->node == dlm->node_num) {
1746                         /* MIGRATION ONLY! */
1747                         BUG_ON(!(mres->flags & DLM_MRES_MIGRATION));
1748
1749                         spin_lock(&res->spinlock);
1750                         for (j = DLM_GRANTED_LIST; j <= DLM_BLOCKED_LIST; j++) {
1751                                 tmpq = dlm_list_idx_to_ptr(res, j);
1752                                 list_for_each(iter, tmpq) {
1753                                         lock = list_entry (iter, struct dlm_lock, list);
1754                                         if (lock->ml.cookie != ml->cookie)
1755                                                 lock = NULL;
1756                                         else
1757                                                 break;
1758                                 }
1759                                 if (lock)
1760                                         break;
1761                         }
1762
1763                         /* lock is always created locally first, and
1764                          * destroyed locally last.  it must be on the list */
1765                         if (!lock) {
1766                                 u64 c = ml->cookie;
1767                                 mlog(ML_ERROR, "could not find local lock "
1768                                                "with cookie %u:%llu!\n",
1769                                                dlm_get_lock_cookie_node(c),
1770                                                dlm_get_lock_cookie_seq(c));
1771                                 __dlm_print_one_lock_resource(res);
1772                                 BUG();
1773                         }
1774                         BUG_ON(lock->ml.node != ml->node);
1775
1776                         if (tmpq != queue) {
1777                                 mlog(0, "lock was on %u instead of %u for %.*s\n",
1778                                      j, ml->list, res->lockname.len, res->lockname.name);
1779                                 spin_unlock(&res->spinlock);
1780                                 continue;
1781                         }
1782
1783                         /* see NOTE above about why we do not update
1784                          * to match the master here */
1785
1786                         /* move the lock to its proper place */
1787                         /* do not alter lock refcount.  switching lists. */
1788                         list_move_tail(&lock->list, queue);
1789                         spin_unlock(&res->spinlock);
1790                         added++;
1791
1792                         mlog(0, "just reordered a local lock!\n");
1793                         continue;
1794                 }
1795
1796                 /* lock is for another node. */
1797                 newlock = dlm_new_lock(ml->type, ml->node,
1798                                        be64_to_cpu(ml->cookie), NULL);
1799                 if (!newlock) {
1800                         ret = -ENOMEM;
1801                         goto leave;
1802                 }
1803                 lksb = newlock->lksb;
1804                 dlm_lock_attach_lockres(newlock, res);
1805
1806                 if (ml->convert_type != LKM_IVMODE) {
1807                         BUG_ON(queue != &res->converting);
1808                         newlock->ml.convert_type = ml->convert_type;
1809                 }
1810                 lksb->flags |= (ml->flags &
1811                                 (DLM_LKSB_PUT_LVB|DLM_LKSB_GET_LVB));
1812
1813                 if (ml->type == LKM_NLMODE)
1814                         goto skip_lvb;
1815
1816                 if (!dlm_lvb_is_empty(mres->lvb)) {
1817                         if (lksb->flags & DLM_LKSB_PUT_LVB) {
1818                                 /* other node was trying to update
1819                                  * lvb when node died.  recreate the
1820                                  * lksb with the updated lvb. */
1821                                 memcpy(lksb->lvb, mres->lvb, DLM_LVB_LEN);
1822                                 /* the lock resource lvb update must happen
1823                                  * NOW, before the spinlock is dropped.
1824                                  * we no longer wait for the AST to update
1825                                  * the lvb. */
1826                                 memcpy(res->lvb, mres->lvb, DLM_LVB_LEN);
1827                         } else {
1828                                 /* otherwise, the node is sending its 
1829                                  * most recent valid lvb info */
1830                                 BUG_ON(ml->type != LKM_EXMODE &&
1831                                        ml->type != LKM_PRMODE);
1832                                 if (!dlm_lvb_is_empty(res->lvb) &&
1833                                     (ml->type == LKM_EXMODE ||
1834                                      memcmp(res->lvb, mres->lvb, DLM_LVB_LEN))) {
1835                                         int i;
1836                                         mlog(ML_ERROR, "%s:%.*s: received bad "
1837                                              "lvb! type=%d\n", dlm->name,
1838                                              res->lockname.len,
1839                                              res->lockname.name, ml->type);
1840                                         printk("lockres lvb=[");
1841                                         for (i=0; i<DLM_LVB_LEN; i++)
1842                                                 printk("%02x", res->lvb[i]);
1843                                         printk("]\nmigrated lvb=[");
1844                                         for (i=0; i<DLM_LVB_LEN; i++)
1845                                                 printk("%02x", mres->lvb[i]);
1846                                         printk("]\n");
1847                                         dlm_print_one_lock_resource(res);
1848                                         BUG();
1849                                 }
1850                                 memcpy(res->lvb, mres->lvb, DLM_LVB_LEN);
1851                         }
1852                 }
1853 skip_lvb:
1854
1855                 /* NOTE:
1856                  * wrt lock queue ordering and recovery:
1857                  *    1. order of locks on granted queue is
1858                  *       meaningless.
1859                  *    2. order of locks on converting queue is
1860                  *       LOST with the node death.  sorry charlie.
1861                  *    3. order of locks on the blocked queue is
1862                  *       also LOST.
1863                  * order of locks does not affect integrity, it
1864                  * just means that a lock request may get pushed
1865                  * back in line as a result of the node death.
1866                  * also note that for a given node the lock order
1867                  * for its secondary queue locks is preserved
1868                  * relative to each other, but clearly *not*
1869                  * preserved relative to locks from other nodes.
1870                  */
1871                 bad = 0;
1872                 spin_lock(&res->spinlock);
1873                 list_for_each_entry(lock, queue, list) {
1874                         if (lock->ml.cookie == ml->cookie) {
1875                                 u64 c = lock->ml.cookie;
1876                                 mlog(ML_ERROR, "%s:%.*s: %u:%llu: lock already "
1877                                      "exists on this lockres!\n", dlm->name,
1878                                      res->lockname.len, res->lockname.name,
1879                                      dlm_get_lock_cookie_node(c),
1880                                      dlm_get_lock_cookie_seq(c));
1881
1882                                 mlog(ML_NOTICE, "sent lock: type=%d, conv=%d, "
1883                                      "node=%u, cookie=%u:%llu, queue=%d\n",
1884                                      ml->type, ml->convert_type, ml->node,
1885                                      dlm_get_lock_cookie_node(ml->cookie),
1886                                      dlm_get_lock_cookie_seq(ml->cookie),
1887                                      ml->list);
1888
1889                                 __dlm_print_one_lock_resource(res);
1890                                 bad = 1;
1891                                 break;
1892                         }
1893                 }
1894                 if (!bad) {
1895                         dlm_lock_get(newlock);
1896                         list_add_tail(&newlock->list, queue);
1897                         mlog(0, "%s:%.*s: added lock for node %u, "
1898                              "setting refmap bit\n", dlm->name,
1899                              res->lockname.len, res->lockname.name, ml->node);
1900                         dlm_lockres_set_refmap_bit(ml->node, res);
1901                         added++;
1902                 }
1903                 spin_unlock(&res->spinlock);
1904         }
1905         mlog(0, "done running all the locks\n");
1906
1907 leave:
1908         /* balance the ref taken when the work was queued */
1909         spin_lock(&res->spinlock);
1910         dlm_lockres_drop_inflight_ref(dlm, res);
1911         spin_unlock(&res->spinlock);
1912
1913         if (ret < 0) {
1914                 mlog_errno(ret);
1915                 if (newlock)
1916                         dlm_lock_put(newlock);
1917         }
1918
1919         mlog_exit(ret);
1920         return ret;
1921 }
1922
1923 void dlm_move_lockres_to_recovery_list(struct dlm_ctxt *dlm,
1924                                        struct dlm_lock_resource *res)
1925 {
1926         int i;
1927         struct list_head *queue, *iter, *iter2;
1928         struct dlm_lock *lock;
1929
1930         res->state |= DLM_LOCK_RES_RECOVERING;
1931         if (!list_empty(&res->recovering)) {
1932                 mlog(0,
1933                      "Recovering res %s:%.*s, is already on recovery list!\n",
1934                      dlm->name, res->lockname.len, res->lockname.name);
1935                 list_del_init(&res->recovering);
1936         }
1937         /* We need to hold a reference while on the recovery list */
1938         dlm_lockres_get(res);
1939         list_add_tail(&res->recovering, &dlm->reco.resources);
1940
1941         /* find any pending locks and put them back on proper list */
1942         for (i=DLM_BLOCKED_LIST; i>=DLM_GRANTED_LIST; i--) {
1943                 queue = dlm_list_idx_to_ptr(res, i);
1944                 list_for_each_safe(iter, iter2, queue) {
1945                         lock = list_entry (iter, struct dlm_lock, list);
1946                         dlm_lock_get(lock);
1947                         if (lock->convert_pending) {
1948                                 /* move converting lock back to granted */
1949                                 BUG_ON(i != DLM_CONVERTING_LIST);
1950                                 mlog(0, "node died with convert pending "
1951                                      "on %.*s. move back to granted list.\n",
1952                                      res->lockname.len, res->lockname.name);
1953                                 dlm_revert_pending_convert(res, lock);
1954                                 lock->convert_pending = 0;
1955                         } else if (lock->lock_pending) {
1956                                 /* remove pending lock requests completely */
1957                                 BUG_ON(i != DLM_BLOCKED_LIST);
1958                                 mlog(0, "node died with lock pending "
1959                                      "on %.*s. remove from blocked list and skip.\n",
1960                                      res->lockname.len, res->lockname.name);
1961                                 /* lock will be floating until ref in
1962                                  * dlmlock_remote is freed after the network
1963                                  * call returns.  ok for it to not be on any
1964                                  * list since no ast can be called
1965                                  * (the master is dead). */
1966                                 dlm_revert_pending_lock(res, lock);
1967                                 lock->lock_pending = 0;
1968                         } else if (lock->unlock_pending) {
1969                                 /* if an unlock was in progress, treat as
1970                                  * if this had completed successfully
1971                                  * before sending this lock state to the
1972                                  * new master.  note that the dlm_unlock
1973                                  * call is still responsible for calling
1974                                  * the unlockast.  that will happen after
1975                                  * the network call times out.  for now,
1976                                  * just move lists to prepare the new
1977                                  * recovery master.  */
1978                                 BUG_ON(i != DLM_GRANTED_LIST);
1979                                 mlog(0, "node died with unlock pending "
1980                                      "on %.*s. remove from blocked list and skip.\n",
1981                                      res->lockname.len, res->lockname.name);
1982                                 dlm_commit_pending_unlock(res, lock);
1983                                 lock->unlock_pending = 0;
1984                         } else if (lock->cancel_pending) {
1985                                 /* if a cancel was in progress, treat as
1986                                  * if this had completed successfully
1987                                  * before sending this lock state to the
1988                                  * new master */
1989                                 BUG_ON(i != DLM_CONVERTING_LIST);
1990                                 mlog(0, "node died with cancel pending "
1991                                      "on %.*s. move back to granted list.\n",
1992                                      res->lockname.len, res->lockname.name);
1993                                 dlm_commit_pending_cancel(res, lock);
1994                                 lock->cancel_pending = 0;
1995                         }
1996                         dlm_lock_put(lock);
1997                 }
1998         }
1999 }
2000
2001
2002
2003 /* removes all recovered locks from the recovery list.
2004  * sets the res->owner to the new master.
2005  * unsets the RECOVERY flag and wakes waiters. */
2006 static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm,
2007                                               u8 dead_node, u8 new_master)
2008 {
2009         int i;
2010         struct list_head *iter, *iter2;
2011         struct hlist_node *hash_iter;
2012         struct hlist_head *bucket;
2013
2014         struct dlm_lock_resource *res;
2015
2016         mlog_entry_void();
2017
2018         assert_spin_locked(&dlm->spinlock);
2019
2020         list_for_each_safe(iter, iter2, &dlm->reco.resources) {
2021                 res = list_entry (iter, struct dlm_lock_resource, recovering);
2022                 if (res->owner == dead_node) {
2023                         list_del_init(&res->recovering);
2024                         spin_lock(&res->spinlock);
2025                         /* new_master has our reference from
2026                          * the lock state sent during recovery */
2027                         dlm_change_lockres_owner(dlm, res, new_master);
2028                         res->state &= ~DLM_LOCK_RES_RECOVERING;
2029                         if (__dlm_lockres_has_locks(res))
2030                                 __dlm_dirty_lockres(dlm, res);
2031                         spin_unlock(&res->spinlock);
2032                         wake_up(&res->wq);
2033                         dlm_lockres_put(res);
2034                 }
2035         }
2036
2037         /* this will become unnecessary eventually, but
2038          * for now we need to run the whole hash, clear
2039          * the RECOVERING state and set the owner
2040          * if necessary */
2041         for (i = 0; i < DLM_HASH_BUCKETS; i++) {
2042                 bucket = dlm_lockres_hash(dlm, i);
2043                 hlist_for_each_entry(res, hash_iter, bucket, hash_node) {
2044                         if (res->state & DLM_LOCK_RES_RECOVERING) {
2045                                 if (res->owner == dead_node) {
2046                                         mlog(0, "(this=%u) res %.*s owner=%u "
2047                                              "was not on recovering list, but "
2048                                              "clearing state anyway\n",
2049                                              dlm->node_num, res->lockname.len,
2050                                              res->lockname.name, new_master);
2051                                 } else if (res->owner == dlm->node_num) {
2052                                         mlog(0, "(this=%u) res %.*s owner=%u "
2053                                              "was not on recovering list, "
2054                                              "owner is THIS node, clearing\n",
2055                                              dlm->node_num, res->lockname.len,
2056                                              res->lockname.name, new_master);
2057                                 } else
2058                                         continue;
2059
2060                                 if (!list_empty(&res->recovering)) {
2061                                         mlog(0, "%s:%.*s: lockres was "
2062                                              "marked RECOVERING, owner=%u\n",
2063                                              dlm->name, res->lockname.len,
2064                                              res->lockname.name, res->owner);
2065                                         list_del_init(&res->recovering);
2066                                         dlm_lockres_put(res);
2067                                 }
2068                                 spin_lock(&res->spinlock);
2069                                 /* new_master has our reference from
2070                                  * the lock state sent during recovery */
2071                                 dlm_change_lockres_owner(dlm, res, new_master);
2072                                 res->state &= ~DLM_LOCK_RES_RECOVERING;
2073                                 if (__dlm_lockres_has_locks(res))
2074                                         __dlm_dirty_lockres(dlm, res);
2075                                 spin_unlock(&res->spinlock);
2076                                 wake_up(&res->wq);
2077                         }
2078                 }
2079         }
2080 }
2081
2082 static inline int dlm_lvb_needs_invalidation(struct dlm_lock *lock, int local)
2083 {
2084         if (local) {
2085                 if (lock->ml.type != LKM_EXMODE &&
2086                     lock->ml.type != LKM_PRMODE)
2087                         return 1;
2088         } else if (lock->ml.type == LKM_EXMODE)
2089                 return 1;
2090         return 0;
2091 }
2092
2093 static void dlm_revalidate_lvb(struct dlm_ctxt *dlm,
2094                                struct dlm_lock_resource *res, u8 dead_node)
2095 {
2096         struct list_head *iter, *queue;
2097         struct dlm_lock *lock;
2098         int blank_lvb = 0, local = 0;
2099         int i;
2100         u8 search_node;
2101
2102         assert_spin_locked(&dlm->spinlock);
2103         assert_spin_locked(&res->spinlock);
2104
2105         if (res->owner == dlm->node_num)
2106                 /* if this node owned the lockres, and if the dead node 
2107                  * had an EX when he died, blank out the lvb */
2108                 search_node = dead_node;
2109         else {
2110                 /* if this is a secondary lockres, and we had no EX or PR
2111                  * locks granted, we can no longer trust the lvb */
2112                 search_node = dlm->node_num;
2113                 local = 1;  /* check local state for valid lvb */
2114         }
2115
2116         for (i=DLM_GRANTED_LIST; i<=DLM_CONVERTING_LIST; i++) {
2117                 queue = dlm_list_idx_to_ptr(res, i);
2118                 list_for_each(iter, queue) {
2119                         lock = list_entry (iter, struct dlm_lock, list);
2120                         if (lock->ml.node == search_node) {
2121                                 if (dlm_lvb_needs_invalidation(lock, local)) {
2122                                         /* zero the lksb lvb and lockres lvb */
2123                                         blank_lvb = 1;
2124                                         memset(lock->lksb->lvb, 0, DLM_LVB_LEN);
2125                                 }
2126                         }
2127                 }
2128         }
2129
2130         if (blank_lvb) {
2131                 mlog(0, "clearing %.*s lvb, dead node %u had EX\n",
2132                      res->lockname.len, res->lockname.name, dead_node);
2133                 memset(res->lvb, 0, DLM_LVB_LEN);
2134         }
2135 }
2136
2137 static void dlm_free_dead_locks(struct dlm_ctxt *dlm,
2138                                 struct dlm_lock_resource *res, u8 dead_node)
2139 {
2140         struct list_head *iter, *tmpiter;
2141         struct dlm_lock *lock;
2142         unsigned int freed = 0;
2143
2144         /* this node is the lockres master:
2145          * 1) remove any stale locks for the dead node
2146          * 2) if the dead node had an EX when he died, blank out the lvb 
2147          */
2148         assert_spin_locked(&dlm->spinlock);
2149         assert_spin_locked(&res->spinlock);
2150
2151         /* TODO: check pending_asts, pending_basts here */
2152         list_for_each_safe(iter, tmpiter, &res->granted) {
2153                 lock = list_entry (iter, struct dlm_lock, list);
2154                 if (lock->ml.node == dead_node) {
2155                         list_del_init(&lock->list);
2156                         dlm_lock_put(lock);
2157                         freed++;
2158                 }
2159         }
2160         list_for_each_safe(iter, tmpiter, &res->converting) {
2161                 lock = list_entry (iter, struct dlm_lock, list);
2162                 if (lock->ml.node == dead_node) {
2163                         list_del_init(&lock->list);
2164                         dlm_lock_put(lock);
2165                         freed++;
2166                 }
2167         }
2168         list_for_each_safe(iter, tmpiter, &res->blocked) {
2169                 lock = list_entry (iter, struct dlm_lock, list);
2170                 if (lock->ml.node == dead_node) {
2171                         list_del_init(&lock->list);
2172                         dlm_lock_put(lock);
2173                         freed++;
2174                 }
2175         }
2176
2177         if (freed) {
2178                 mlog(0, "%s:%.*s: freed %u locks for dead node %u, "
2179                      "dropping ref from lockres\n", dlm->name,
2180                      res->lockname.len, res->lockname.name, freed, dead_node);
2181                 BUG_ON(!test_bit(dead_node, res->refmap));
2182                 dlm_lockres_clear_refmap_bit(dead_node, res);
2183         } else if (test_bit(dead_node, res->refmap)) {
2184                 mlog(0, "%s:%.*s: dead node %u had a ref, but had "
2185                      "no locks and had not purged before dying\n", dlm->name,
2186                      res->lockname.len, res->lockname.name, dead_node);
2187                 dlm_lockres_clear_refmap_bit(dead_node, res);
2188         }
2189
2190         /* do not kick thread yet */
2191         __dlm_dirty_lockres(dlm, res);
2192 }
2193
2194 /* if this node is the recovery master, and there are no
2195  * locks for a given lockres owned by this node that are in
2196  * either PR or EX mode, zero out the lvb before requesting.
2197  *
2198  */
2199
2200
2201 static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node)
2202 {
2203         struct hlist_node *iter;
2204         struct dlm_lock_resource *res;
2205         int i;
2206         struct hlist_head *bucket;
2207         struct dlm_lock *lock;
2208
2209
2210         /* purge any stale mles */
2211         dlm_clean_master_list(dlm, dead_node);
2212
2213         /*
2214          * now clean up all lock resources.  there are two rules:
2215          *
2216          * 1) if the dead node was the master, move the lockres
2217          *    to the recovering list.  set the RECOVERING flag.
2218          *    this lockres needs to be cleaned up before it can
2219          *    be used further.
2220          *
2221          * 2) if this node was the master, remove all locks from
2222          *    each of the lockres queues that were owned by the
2223          *    dead node.  once recovery finishes, the dlm thread
2224          *    can be kicked again to see if any ASTs or BASTs
2225          *    need to be fired as a result.
2226          */
2227         for (i = 0; i < DLM_HASH_BUCKETS; i++) {
2228                 bucket = dlm_lockres_hash(dlm, i);
2229                 hlist_for_each_entry(res, iter, bucket, hash_node) {
2230                         /* always prune any $RECOVERY entries for dead nodes,
2231                          * otherwise hangs can occur during later recovery */
2232                         if (dlm_is_recovery_lock(res->lockname.name,
2233                                                  res->lockname.len)) {
2234                                 spin_lock(&res->spinlock);
2235                                 list_for_each_entry(lock, &res->granted, list) {
2236                                         if (lock->ml.node == dead_node) {
2237                                                 mlog(0, "AHA! there was "
2238                                                      "a $RECOVERY lock for dead "
2239                                                      "node %u (%s)!\n",
2240                                                      dead_node, dlm->name);
2241                                                 list_del_init(&lock->list);
2242                                                 dlm_lock_put(lock);
2243                                                 break;
2244                                         }
2245                                 }
2246                                 spin_unlock(&res->spinlock);
2247                                 continue;
2248                         }                       
2249                         spin_lock(&res->spinlock);
2250                         /* zero the lvb if necessary */
2251                         dlm_revalidate_lvb(dlm, res, dead_node);
2252                         if (res->owner == dead_node) {
2253                                 if (res->state & DLM_LOCK_RES_DROPPING_REF)
2254                                         mlog(0, "%s:%.*s: owned by "
2255                                              "dead node %u, this node was "
2256                                              "dropping its ref when it died. "
2257                                              "continue, dropping the flag.\n",
2258                                              dlm->name, res->lockname.len,
2259                                              res->lockname.name, dead_node);
2260
2261                                 /* the wake_up for this will happen when the
2262                                  * RECOVERING flag is dropped later */
2263                                 res->state &= ~DLM_LOCK_RES_DROPPING_REF;
2264
2265                                 dlm_move_lockres_to_recovery_list(dlm, res);
2266                         } else if (res->owner == dlm->node_num) {
2267                                 dlm_free_dead_locks(dlm, res, dead_node);
2268                                 __dlm_lockres_calc_usage(dlm, res);
2269                         }
2270                         spin_unlock(&res->spinlock);
2271                 }
2272         }
2273
2274 }
2275
2276 static void __dlm_hb_node_down(struct dlm_ctxt *dlm, int idx)
2277 {
2278         assert_spin_locked(&dlm->spinlock);
2279
2280         if (dlm->reco.new_master == idx) {
2281                 mlog(0, "%s: recovery master %d just died\n",
2282                      dlm->name, idx);
2283                 if (dlm->reco.state & DLM_RECO_STATE_FINALIZE) {
2284                         /* finalize1 was reached, so it is safe to clear
2285                          * the new_master and dead_node.  that recovery
2286                          * is complete. */
2287                         mlog(0, "%s: dead master %d had reached "
2288                              "finalize1 state, clearing\n", dlm->name, idx);
2289                         dlm->reco.state &= ~DLM_RECO_STATE_FINALIZE;
2290                         __dlm_reset_recovery(dlm);
2291                 }
2292         }
2293
2294         /* check to see if the node is already considered dead */
2295         if (!test_bit(idx, dlm->live_nodes_map)) {
2296                 mlog(0, "for domain %s, node %d is already dead. "
2297                      "another node likely did recovery already.\n",
2298                      dlm->name, idx);
2299                 return;
2300         }
2301
2302         /* check to see if we do not care about this node */
2303         if (!test_bit(idx, dlm->domain_map)) {
2304                 /* This also catches the case that we get a node down
2305                  * but haven't joined the domain yet. */
2306                 mlog(0, "node %u already removed from domain!\n", idx);
2307                 return;
2308         }
2309
2310         clear_bit(idx, dlm->live_nodes_map);
2311
2312         /* Clean up join state on node death. */
2313         if (dlm->joining_node == idx) {
2314                 mlog(0, "Clearing join state for node %u\n", idx);
2315                 __dlm_set_joining_node(dlm, DLM_LOCK_RES_OWNER_UNKNOWN);
2316         }
2317
2318         /* make sure local cleanup occurs before the heartbeat events */
2319         if (!test_bit(idx, dlm->recovery_map))
2320                 dlm_do_local_recovery_cleanup(dlm, idx);
2321
2322         /* notify anything attached to the heartbeat events */
2323         dlm_hb_event_notify_attached(dlm, idx, 0);
2324
2325         mlog(0, "node %u being removed from domain map!\n", idx);
2326         clear_bit(idx, dlm->domain_map);
2327         /* wake up migration waiters if a node goes down.
2328          * perhaps later we can genericize this for other waiters. */
2329         wake_up(&dlm->migration_wq);
2330
2331         if (test_bit(idx, dlm->recovery_map))
2332                 mlog(0, "domain %s, node %u already added "
2333                      "to recovery map!\n", dlm->name, idx);
2334         else
2335                 set_bit(idx, dlm->recovery_map);
2336 }
2337
2338 void dlm_hb_node_down_cb(struct o2nm_node *node, int idx, void *data)
2339 {
2340         struct dlm_ctxt *dlm = data;
2341
2342         if (!dlm_grab(dlm))
2343                 return;
2344
2345         spin_lock(&dlm->spinlock);
2346         __dlm_hb_node_down(dlm, idx);
2347         spin_unlock(&dlm->spinlock);
2348
2349         dlm_put(dlm);
2350 }
2351
2352 void dlm_hb_node_up_cb(struct o2nm_node *node, int idx, void *data)
2353 {
2354         struct dlm_ctxt *dlm = data;
2355
2356         if (!dlm_grab(dlm))
2357                 return;
2358
2359         spin_lock(&dlm->spinlock);
2360         set_bit(idx, dlm->live_nodes_map);
2361         /* do NOT notify mle attached to the heartbeat events.
2362          * new nodes are not interesting in mastery until joined. */
2363         spin_unlock(&dlm->spinlock);
2364
2365         dlm_put(dlm);
2366 }
2367
2368 static void dlm_reco_ast(void *astdata)
2369 {
2370         struct dlm_ctxt *dlm = astdata;
2371         mlog(0, "ast for recovery lock fired!, this=%u, dlm=%s\n",
2372              dlm->node_num, dlm->name);
2373 }
2374 static void dlm_reco_bast(void *astdata, int blocked_type)
2375 {
2376         struct dlm_ctxt *dlm = astdata;
2377         mlog(0, "bast for recovery lock fired!, this=%u, dlm=%s\n",
2378              dlm->node_num, dlm->name);
2379 }
2380 static void dlm_reco_unlock_ast(void *astdata, enum dlm_status st)
2381 {
2382         mlog(0, "unlockast for recovery lock fired!\n");
2383 }
2384
2385 /*
2386  * dlm_pick_recovery_master will continually attempt to use
2387  * dlmlock() on the special "$RECOVERY" lockres with the
2388  * LKM_NOQUEUE flag to get an EX.  every thread that enters
2389  * this function on each node racing to become the recovery
2390  * master will not stop attempting this until either:
2391  * a) this node gets the EX (and becomes the recovery master),
2392  * or b) dlm->reco.new_master gets set to some nodenum 
2393  * != O2NM_INVALID_NODE_NUM (another node will do the reco).
2394  * so each time a recovery master is needed, the entire cluster
2395  * will sync at this point.  if the new master dies, that will
2396  * be detected in dlm_do_recovery */
2397 static int dlm_pick_recovery_master(struct dlm_ctxt *dlm)
2398 {
2399         enum dlm_status ret;
2400         struct dlm_lockstatus lksb;
2401         int status = -EINVAL;
2402
2403         mlog(0, "starting recovery of %s at %lu, dead=%u, this=%u\n",
2404              dlm->name, jiffies, dlm->reco.dead_node, dlm->node_num);
2405 again:  
2406         memset(&lksb, 0, sizeof(lksb));
2407
2408         ret = dlmlock(dlm, LKM_EXMODE, &lksb, LKM_NOQUEUE|LKM_RECOVERY,
2409                       DLM_RECOVERY_LOCK_NAME, DLM_RECOVERY_LOCK_NAME_LEN,
2410                       dlm_reco_ast, dlm, dlm_reco_bast);
2411
2412         mlog(0, "%s: dlmlock($RECOVERY) returned %d, lksb=%d\n",
2413              dlm->name, ret, lksb.status);
2414
2415         if (ret == DLM_NORMAL) {
2416                 mlog(0, "dlm=%s dlmlock says I got it (this=%u)\n",
2417                      dlm->name, dlm->node_num);
2418                 
2419                 /* got the EX lock.  check to see if another node 
2420                  * just became the reco master */
2421                 if (dlm_reco_master_ready(dlm)) {
2422                         mlog(0, "%s: got reco EX lock, but %u will "
2423                              "do the recovery\n", dlm->name,
2424                              dlm->reco.new_master);
2425                         status = -EEXIST;
2426                 } else {
2427                         status = 0;
2428
2429                         /* see if recovery was already finished elsewhere */
2430                         spin_lock(&dlm->spinlock);
2431                         if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) {
2432                                 status = -EINVAL;       
2433                                 mlog(0, "%s: got reco EX lock, but "
2434                                      "node got recovered already\n", dlm->name);
2435                                 if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM) {
2436                                         mlog(ML_ERROR, "%s: new master is %u "
2437                                              "but no dead node!\n", 
2438                                              dlm->name, dlm->reco.new_master);
2439                                         BUG();
2440                                 }
2441                         }
2442                         spin_unlock(&dlm->spinlock);
2443                 }
2444
2445                 /* if this node has actually become the recovery master,
2446                  * set the master and send the messages to begin recovery */
2447                 if (!status) {
2448                         mlog(0, "%s: dead=%u, this=%u, sending "
2449                              "begin_reco now\n", dlm->name, 
2450                              dlm->reco.dead_node, dlm->node_num);
2451                         status = dlm_send_begin_reco_message(dlm,
2452                                       dlm->reco.dead_node);
2453                         /* this always succeeds */
2454                         BUG_ON(status);
2455
2456                         /* set the new_master to this node */
2457                         spin_lock(&dlm->spinlock);
2458                         dlm_set_reco_master(dlm, dlm->node_num);
2459                         spin_unlock(&dlm->spinlock);
2460                 }
2461
2462                 /* recovery lock is a special case.  ast will not get fired,
2463                  * so just go ahead and unlock it. */
2464                 ret = dlmunlock(dlm, &lksb, 0, dlm_reco_unlock_ast, dlm);
2465                 if (ret == DLM_DENIED) {
2466                         mlog(0, "got DLM_DENIED, trying LKM_CANCEL\n");
2467                         ret = dlmunlock(dlm, &lksb, LKM_CANCEL, dlm_reco_unlock_ast, dlm);
2468                 }
2469                 if (ret != DLM_NORMAL) {
2470                         /* this would really suck. this could only happen
2471                          * if there was a network error during the unlock
2472                          * because of node death.  this means the unlock
2473                          * is actually "done" and the lock structure is
2474                          * even freed.  we can continue, but only
2475                          * because this specific lock name is special. */
2476                         mlog(ML_ERROR, "dlmunlock returned %d\n", ret);
2477                 }
2478         } else if (ret == DLM_NOTQUEUED) {
2479                 mlog(0, "dlm=%s dlmlock says another node got it (this=%u)\n",
2480                      dlm->name, dlm->node_num);
2481                 /* another node is master. wait on
2482                  * reco.new_master != O2NM_INVALID_NODE_NUM 
2483                  * for at most one second */
2484                 wait_event_timeout(dlm->dlm_reco_thread_wq,
2485                                          dlm_reco_master_ready(dlm),
2486                                          msecs_to_jiffies(1000));
2487                 if (!dlm_reco_master_ready(dlm)) {
2488                         mlog(0, "%s: reco master taking awhile\n",
2489                              dlm->name);
2490                         goto again;
2491                 }
2492                 /* another node has informed this one that it is reco master */
2493                 mlog(0, "%s: reco master %u is ready to recover %u\n",
2494                      dlm->name, dlm->reco.new_master, dlm->reco.dead_node);
2495                 status = -EEXIST;
2496         } else if (ret == DLM_RECOVERING) {
2497                 mlog(0, "dlm=%s dlmlock says master node died (this=%u)\n",
2498                      dlm->name, dlm->node_num);
2499                 goto again;
2500         } else {
2501                 struct dlm_lock_resource *res;
2502
2503                 /* dlmlock returned something other than NOTQUEUED or NORMAL */
2504                 mlog(ML_ERROR, "%s: got %s from dlmlock($RECOVERY), "
2505                      "lksb.status=%s\n", dlm->name, dlm_errname(ret),
2506                      dlm_errname(lksb.status));
2507                 res = dlm_lookup_lockres(dlm, DLM_RECOVERY_LOCK_NAME,
2508                                          DLM_RECOVERY_LOCK_NAME_LEN);
2509                 if (res) {
2510                         dlm_print_one_lock_resource(res);
2511                         dlm_lockres_put(res);
2512                 } else {
2513                         mlog(ML_ERROR, "recovery lock not found\n");
2514                 }
2515                 BUG();
2516         }
2517
2518         return status;
2519 }
2520
2521 static int dlm_send_begin_reco_message(struct dlm_ctxt *dlm, u8 dead_node)
2522 {
2523         struct dlm_begin_reco br;
2524         int ret = 0;
2525         struct dlm_node_iter iter;
2526         int nodenum;
2527         int status;
2528
2529         mlog_entry("%u\n", dead_node);
2530
2531         mlog(0, "%s: dead node is %u\n", dlm->name, dead_node);
2532
2533         spin_lock(&dlm->spinlock);
2534         dlm_node_iter_init(dlm->domain_map, &iter);
2535         spin_unlock(&dlm->spinlock);
2536
2537         clear_bit(dead_node, iter.node_map);
2538
2539         memset(&br, 0, sizeof(br));
2540         br.node_idx = dlm->node_num;
2541         br.dead_node = dead_node;
2542
2543         while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
2544                 ret = 0;
2545                 if (nodenum == dead_node) {
2546                         mlog(0, "not sending begin reco to dead node "
2547                                   "%u\n", dead_node);
2548                         continue;
2549                 }
2550                 if (nodenum == dlm->node_num) {
2551                         mlog(0, "not sending begin reco to self\n");
2552                         continue;
2553                 }
2554 retry:
2555                 ret = -EINVAL;
2556                 mlog(0, "attempting to send begin reco msg to %d\n",
2557                           nodenum);
2558                 ret = o2net_send_message(DLM_BEGIN_RECO_MSG, dlm->key,
2559                                          &br, sizeof(br), nodenum, &status);
2560                 /* negative status is handled ok by caller here */
2561                 if (ret >= 0)
2562                         ret = status;
2563                 if (dlm_is_host_down(ret)) {
2564                         /* node is down.  not involved in recovery
2565                          * so just keep going */
2566                         mlog(0, "%s: node %u was down when sending "
2567                              "begin reco msg (%d)\n", dlm->name, nodenum, ret);
2568                         ret = 0;
2569                 }
2570                 if (ret < 0) {
2571                         struct dlm_lock_resource *res;
2572                         /* this is now a serious problem, possibly ENOMEM 
2573                          * in the network stack.  must retry */
2574                         mlog_errno(ret);
2575                         mlog(ML_ERROR, "begin reco of dlm %s to node %u "
2576                             " returned %d\n", dlm->name, nodenum, ret);
2577                         res = dlm_lookup_lockres(dlm, DLM_RECOVERY_LOCK_NAME,
2578                                                  DLM_RECOVERY_LOCK_NAME_LEN);
2579                         if (res) {
2580                                 dlm_print_one_lock_resource(res);
2581                                 dlm_lockres_put(res);
2582                         } else {
2583                                 mlog(ML_ERROR, "recovery lock not found\n");
2584                         }
2585                         /* sleep for a bit in hopes that we can avoid 
2586                          * another ENOMEM */
2587                         msleep(100);
2588                         goto retry;
2589                 } else if (ret == EAGAIN) {
2590                         mlog(0, "%s: trying to start recovery of node "
2591                              "%u, but node %u is waiting for last recovery "
2592                              "to complete, backoff for a bit\n", dlm->name,
2593                              dead_node, nodenum);
2594                         /* TODO Look into replacing msleep with cond_resched() */
2595                         msleep(100);
2596                         goto retry;
2597                 }
2598         }
2599
2600         return ret;
2601 }
2602
2603 int dlm_begin_reco_handler(struct o2net_msg *msg, u32 len, void *data)
2604 {
2605         struct dlm_ctxt *dlm = data;
2606         struct dlm_begin_reco *br = (struct dlm_begin_reco *)msg->buf;
2607
2608         /* ok to return 0, domain has gone away */
2609         if (!dlm_grab(dlm))
2610                 return 0;
2611
2612         spin_lock(&dlm->spinlock);
2613         if (dlm->reco.state & DLM_RECO_STATE_FINALIZE) {
2614                 mlog(0, "%s: node %u wants to recover node %u (%u:%u) "
2615                      "but this node is in finalize state, waiting on finalize2\n",
2616                      dlm->name, br->node_idx, br->dead_node,
2617                      dlm->reco.dead_node, dlm->reco.new_master);
2618                 spin_unlock(&dlm->spinlock);
2619                 return EAGAIN;
2620         }
2621         spin_unlock(&dlm->spinlock);
2622
2623         mlog(0, "%s: node %u wants to recover node %u (%u:%u)\n",
2624              dlm->name, br->node_idx, br->dead_node,
2625              dlm->reco.dead_node, dlm->reco.new_master);
2626
2627         dlm_fire_domain_eviction_callbacks(dlm, br->dead_node);
2628
2629         spin_lock(&dlm->spinlock);
2630         if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM) {
2631                 if (test_bit(dlm->reco.new_master, dlm->recovery_map)) {
2632                         mlog(0, "%s: new_master %u died, changing "
2633                              "to %u\n", dlm->name, dlm->reco.new_master,
2634                              br->node_idx);
2635                 } else {
2636                         mlog(0, "%s: new_master %u NOT DEAD, changing "
2637                              "to %u\n", dlm->name, dlm->reco.new_master,
2638                              br->node_idx);
2639                         /* may not have seen the new master as dead yet */
2640                 }
2641         }
2642         if (dlm->reco.dead_node != O2NM_INVALID_NODE_NUM) {
2643                 mlog(ML_NOTICE, "%s: dead_node previously set to %u, "
2644                      "node %u changing it to %u\n", dlm->name, 
2645                      dlm->reco.dead_node, br->node_idx, br->dead_node);
2646         }
2647         dlm_set_reco_master(dlm, br->node_idx);
2648         dlm_set_reco_dead_node(dlm, br->dead_node);
2649         if (!test_bit(br->dead_node, dlm->recovery_map)) {
2650                 mlog(0, "recovery master %u sees %u as dead, but this "
2651                      "node has not yet.  marking %u as dead\n",
2652                      br->node_idx, br->dead_node, br->dead_node);
2653                 if (!test_bit(br->dead_node, dlm->domain_map) ||
2654                     !test_bit(br->dead_node, dlm->live_nodes_map))
2655                         mlog(0, "%u not in domain/live_nodes map "
2656                              "so setting it in reco map manually\n",
2657                              br->dead_node);
2658                 /* force the recovery cleanup in __dlm_hb_node_down
2659                  * both of these will be cleared in a moment */
2660                 set_bit(br->dead_node, dlm->domain_map);
2661                 set_bit(br->dead_node, dlm->live_nodes_map);
2662                 __dlm_hb_node_down(dlm, br->dead_node);
2663         }
2664         spin_unlock(&dlm->spinlock);
2665
2666         dlm_kick_recovery_thread(dlm);
2667
2668         mlog(0, "%s: recovery started by node %u, for %u (%u:%u)\n",
2669              dlm->name, br->node_idx, br->dead_node,
2670              dlm->reco.dead_node, dlm->reco.new_master);
2671
2672         dlm_put(dlm);
2673         return 0;
2674 }
2675
2676 #define DLM_FINALIZE_STAGE2  0x01
2677 static int dlm_send_finalize_reco_message(struct dlm_ctxt *dlm)
2678 {
2679         int ret = 0;
2680         struct dlm_finalize_reco fr;
2681         struct dlm_node_iter iter;
2682         int nodenum;
2683         int status;
2684         int stage = 1;
2685
2686         mlog(0, "finishing recovery for node %s:%u, "
2687              "stage %d\n", dlm->name, dlm->reco.dead_node, stage);
2688
2689         spin_lock(&dlm->spinlock);
2690         dlm_node_iter_init(dlm->domain_map, &iter);
2691         spin_unlock(&dlm->spinlock);
2692
2693 stage2:
2694         memset(&fr, 0, sizeof(fr));
2695         fr.node_idx = dlm->node_num;
2696         fr.dead_node = dlm->reco.dead_node;
2697         if (stage == 2)
2698                 fr.flags |= DLM_FINALIZE_STAGE2;
2699
2700         while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
2701                 if (nodenum == dlm->node_num)
2702                         continue;
2703                 ret = o2net_send_message(DLM_FINALIZE_RECO_MSG, dlm->key,
2704                                          &fr, sizeof(fr), nodenum, &status);
2705                 if (ret >= 0)
2706                         ret = status;
2707                 if (ret < 0) {
2708                         mlog_errno(ret);
2709                         if (dlm_is_host_down(ret)) {
2710                                 /* this has no effect on this recovery 
2711                                  * session, so set the status to zero to 
2712                                  * finish out the last recovery */
2713                                 mlog(ML_ERROR, "node %u went down after this "
2714                                      "node finished recovery.\n", nodenum);
2715                                 ret = 0;
2716                                 continue;
2717                         }
2718                         break;
2719                 }
2720         }
2721         if (stage == 1) {
2722                 /* reset the node_iter back to the top and send finalize2 */
2723                 iter.curnode = -1;
2724                 stage = 2;
2725                 goto stage2;
2726         }
2727
2728         return ret;
2729 }
2730
2731 int dlm_finalize_reco_handler(struct o2net_msg *msg, u32 len, void *data)
2732 {
2733         struct dlm_ctxt *dlm = data;
2734         struct dlm_finalize_reco *fr = (struct dlm_finalize_reco *)msg->buf;
2735         int stage = 1;
2736
2737         /* ok to return 0, domain has gone away */
2738         if (!dlm_grab(dlm))
2739                 return 0;
2740
2741         if (fr->flags & DLM_FINALIZE_STAGE2)
2742                 stage = 2;
2743
2744         mlog(0, "%s: node %u finalizing recovery stage%d of "
2745              "node %u (%u:%u)\n", dlm->name, fr->node_idx, stage,
2746              fr->dead_node, dlm->reco.dead_node, dlm->reco.new_master);
2747  
2748         spin_lock(&dlm->spinlock);
2749
2750         if (dlm->reco.new_master != fr->node_idx) {
2751                 mlog(ML_ERROR, "node %u sent recovery finalize msg, but node "
2752                      "%u is supposed to be the new master, dead=%u\n",
2753                      fr->node_idx, dlm->reco.new_master, fr->dead_node);
2754                 BUG();
2755         }
2756         if (dlm->reco.dead_node != fr->dead_node) {
2757                 mlog(ML_ERROR, "node %u sent recovery finalize msg for dead "
2758                      "node %u, but node %u is supposed to be dead\n",
2759                      fr->node_idx, fr->dead_node, dlm->reco.dead_node);
2760                 BUG();
2761         }
2762
2763         switch (stage) {
2764                 case 1:
2765                         dlm_finish_local_lockres_recovery(dlm, fr->dead_node, fr->node_idx);
2766                         if (dlm->reco.state & DLM_RECO_STATE_FINALIZE) {
2767                                 mlog(ML_ERROR, "%s: received finalize1 from "
2768                                      "new master %u for dead node %u, but "
2769                                      "this node has already received it!\n",
2770                                      dlm->name, fr->node_idx, fr->dead_node);
2771                                 dlm_print_reco_node_status(dlm);
2772                                 BUG();
2773                         }
2774                         dlm->reco.state |= DLM_RECO_STATE_FINALIZE;
2775                         spin_unlock(&dlm->spinlock);
2776                         break;
2777                 case 2:
2778                         if (!(dlm->reco.state & DLM_RECO_STATE_FINALIZE)) {
2779                                 mlog(ML_ERROR, "%s: received finalize2 from "
2780                                      "new master %u for dead node %u, but "
2781                                      "this node did not have finalize1!\n",
2782                                      dlm->name, fr->node_idx, fr->dead_node);
2783                                 dlm_print_reco_node_status(dlm);
2784                                 BUG();
2785                         }
2786                         dlm->reco.state &= ~DLM_RECO_STATE_FINALIZE;
2787                         spin_unlock(&dlm->spinlock);
2788                         dlm_reset_recovery(dlm);
2789                         dlm_kick_recovery_thread(dlm);
2790                         break;
2791                 default:
2792                         BUG();
2793         }
2794
2795         mlog(0, "%s: recovery done, reco master was %u, dead now %u, master now %u\n",
2796              dlm->name, fr->node_idx, dlm->reco.dead_node, dlm->reco.new_master);
2797
2798         dlm_put(dlm);
2799         return 0;
2800 }