1 /******************************************************************************
2 *******************************************************************************
4 ** Copyright (C) 2005 Red Hat, Inc. All rights reserved.
6 ** This copyrighted material is made available to anyone wishing to use,
7 ** modify, copy, or redistribute it subject to the terms and conditions
8 ** of the GNU General Public License v.2.
10 *******************************************************************************
11 ******************************************************************************/
13 /* Central locking logic has four stages:
33 Stage 1 (lock, unlock) is mainly about checking input args and
34 splitting into one of the four main operations:
36 dlm_lock = request_lock
37 dlm_lock+CONVERT = convert_lock
38 dlm_unlock = unlock_lock
39 dlm_unlock+CANCEL = cancel_lock
41 Stage 2, xxxx_lock(), just finds and locks the relevant rsb which is
42 provided to the next stage.
44 Stage 3, _xxxx_lock(), determines if the operation is local or remote.
45 When remote, it calls send_xxxx(), when local it calls do_xxxx().
47 Stage 4, do_xxxx(), is the guts of the operation. It manipulates the
48 given rsb and lkb and queues callbacks.
50 For remote operations, send_xxxx() results in the corresponding do_xxxx()
51 function being executed on the remote node. The connecting send/receive
52 calls on local (L) and remote (R) nodes:
54 L: send_xxxx() -> R: receive_xxxx()
56 L: receive_xxxx_reply() <- R: send_xxxx_reply()
59 #include "dlm_internal.h"
62 #include "requestqueue.h"
66 #include "lockspace.h"
71 #include "lvb_table.h"
74 static int send_request(struct dlm_rsb *r, struct dlm_lkb *lkb);
75 static int send_convert(struct dlm_rsb *r, struct dlm_lkb *lkb);
76 static int send_unlock(struct dlm_rsb *r, struct dlm_lkb *lkb);
77 static int send_cancel(struct dlm_rsb *r, struct dlm_lkb *lkb);
78 static int send_grant(struct dlm_rsb *r, struct dlm_lkb *lkb);
79 static int send_bast(struct dlm_rsb *r, struct dlm_lkb *lkb, int mode);
80 static int send_lookup(struct dlm_rsb *r, struct dlm_lkb *lkb);
81 static int send_remove(struct dlm_rsb *r);
82 static int _request_lock(struct dlm_rsb *r, struct dlm_lkb *lkb);
83 static void __receive_convert_reply(struct dlm_rsb *r, struct dlm_lkb *lkb,
84 struct dlm_message *ms);
85 static int receive_extralen(struct dlm_message *ms);
88 * Lock compatibilty matrix - thanks Steve
89 * UN = Unlocked state. Not really a state, used as a flag
90 * PD = Padding. Used to make the matrix a nice power of two in size
91 * Other states are the same as the VMS DLM.
92 * Usage: matrix[grmode+1][rqmode+1] (although m[rq+1][gr+1] is the same)
95 static const int __dlm_compat_matrix[8][8] = {
96 /* UN NL CR CW PR PW EX PD */
97 {1, 1, 1, 1, 1, 1, 1, 0}, /* UN */
98 {1, 1, 1, 1, 1, 1, 1, 0}, /* NL */
99 {1, 1, 1, 1, 1, 1, 0, 0}, /* CR */
100 {1, 1, 1, 1, 0, 0, 0, 0}, /* CW */
101 {1, 1, 1, 0, 1, 0, 0, 0}, /* PR */
102 {1, 1, 1, 0, 0, 0, 0, 0}, /* PW */
103 {1, 1, 0, 0, 0, 0, 0, 0}, /* EX */
104 {0, 0, 0, 0, 0, 0, 0, 0} /* PD */
108 * This defines the direction of transfer of LVB data.
109 * Granted mode is the row; requested mode is the column.
110 * Usage: matrix[grmode+1][rqmode+1]
111 * 1 = LVB is returned to the caller
112 * 0 = LVB is written to the resource
113 * -1 = nothing happens to the LVB
116 const int dlm_lvb_operations[8][8] = {
117 /* UN NL CR CW PR PW EX PD*/
118 { -1, 1, 1, 1, 1, 1, 1, -1 }, /* UN */
119 { -1, 1, 1, 1, 1, 1, 1, 0 }, /* NL */
120 { -1, -1, 1, 1, 1, 1, 1, 0 }, /* CR */
121 { -1, -1, -1, 1, 1, 1, 1, 0 }, /* CW */
122 { -1, -1, -1, -1, 1, 1, 1, 0 }, /* PR */
123 { -1, 0, 0, 0, 0, 0, 1, 0 }, /* PW */
124 { -1, 0, 0, 0, 0, 0, 0, 0 }, /* EX */
125 { -1, 0, 0, 0, 0, 0, 0, 0 } /* PD */
127 EXPORT_SYMBOL_GPL(dlm_lvb_operations);
129 #define modes_compat(gr, rq) \
130 __dlm_compat_matrix[(gr)->lkb_grmode + 1][(rq)->lkb_rqmode + 1]
132 int dlm_modes_compat(int mode1, int mode2)
134 return __dlm_compat_matrix[mode1 + 1][mode2 + 1];
138 * Compatibility matrix for conversions with QUECVT set.
139 * Granted mode is the row; requested mode is the column.
140 * Usage: matrix[grmode+1][rqmode+1]
143 static const int __quecvt_compat_matrix[8][8] = {
144 /* UN NL CR CW PR PW EX PD */
145 {0, 0, 0, 0, 0, 0, 0, 0}, /* UN */
146 {0, 0, 1, 1, 1, 1, 1, 0}, /* NL */
147 {0, 0, 0, 1, 1, 1, 1, 0}, /* CR */
148 {0, 0, 0, 0, 1, 1, 1, 0}, /* CW */
149 {0, 0, 0, 1, 0, 1, 1, 0}, /* PR */
150 {0, 0, 0, 0, 0, 0, 1, 0}, /* PW */
151 {0, 0, 0, 0, 0, 0, 0, 0}, /* EX */
152 {0, 0, 0, 0, 0, 0, 0, 0} /* PD */
155 static void dlm_print_lkb(struct dlm_lkb *lkb)
157 printk(KERN_ERR "lkb: nodeid %d id %x remid %x exflags %x flags %x\n"
158 " status %d rqmode %d grmode %d wait_type %d ast_type %d\n",
159 lkb->lkb_nodeid, lkb->lkb_id, lkb->lkb_remid, lkb->lkb_exflags,
160 lkb->lkb_flags, lkb->lkb_status, lkb->lkb_rqmode,
161 lkb->lkb_grmode, lkb->lkb_wait_type, lkb->lkb_ast_type);
164 void dlm_print_rsb(struct dlm_rsb *r)
166 printk(KERN_ERR "rsb: nodeid %d flags %lx first %x rlc %d name %s\n",
167 r->res_nodeid, r->res_flags, r->res_first_lkid,
168 r->res_recover_locks_count, r->res_name);
171 /* Threads cannot use the lockspace while it's being recovered */
173 static inline void lock_recovery(struct dlm_ls *ls)
175 down_read(&ls->ls_in_recovery);
178 static inline void unlock_recovery(struct dlm_ls *ls)
180 up_read(&ls->ls_in_recovery);
183 static inline int lock_recovery_try(struct dlm_ls *ls)
185 return down_read_trylock(&ls->ls_in_recovery);
188 static inline int can_be_queued(struct dlm_lkb *lkb)
190 return !(lkb->lkb_exflags & DLM_LKF_NOQUEUE);
193 static inline int force_blocking_asts(struct dlm_lkb *lkb)
195 return (lkb->lkb_exflags & DLM_LKF_NOQUEUEBAST);
198 static inline int is_demoted(struct dlm_lkb *lkb)
200 return (lkb->lkb_sbflags & DLM_SBF_DEMOTED);
203 static inline int is_remote(struct dlm_rsb *r)
205 DLM_ASSERT(r->res_nodeid >= 0, dlm_print_rsb(r););
206 return !!r->res_nodeid;
209 static inline int is_process_copy(struct dlm_lkb *lkb)
211 return (lkb->lkb_nodeid && !(lkb->lkb_flags & DLM_IFL_MSTCPY));
214 static inline int is_master_copy(struct dlm_lkb *lkb)
216 if (lkb->lkb_flags & DLM_IFL_MSTCPY)
217 DLM_ASSERT(lkb->lkb_nodeid, dlm_print_lkb(lkb););
218 return (lkb->lkb_flags & DLM_IFL_MSTCPY) ? 1 : 0;
221 static inline int middle_conversion(struct dlm_lkb *lkb)
223 if ((lkb->lkb_grmode==DLM_LOCK_PR && lkb->lkb_rqmode==DLM_LOCK_CW) ||
224 (lkb->lkb_rqmode==DLM_LOCK_PR && lkb->lkb_grmode==DLM_LOCK_CW))
229 static inline int down_conversion(struct dlm_lkb *lkb)
231 return (!middle_conversion(lkb) && lkb->lkb_rqmode < lkb->lkb_grmode);
234 static void queue_cast(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
236 if (is_master_copy(lkb))
239 DLM_ASSERT(lkb->lkb_lksb, dlm_print_lkb(lkb););
241 lkb->lkb_lksb->sb_status = rv;
242 lkb->lkb_lksb->sb_flags = lkb->lkb_sbflags;
244 dlm_add_ast(lkb, AST_COMP);
247 static void queue_bast(struct dlm_rsb *r, struct dlm_lkb *lkb, int rqmode)
249 if (is_master_copy(lkb))
250 send_bast(r, lkb, rqmode);
252 lkb->lkb_bastmode = rqmode;
253 dlm_add_ast(lkb, AST_BAST);
258 * Basic operations on rsb's and lkb's
261 static struct dlm_rsb *create_rsb(struct dlm_ls *ls, char *name, int len)
265 r = allocate_rsb(ls, len);
271 memcpy(r->res_name, name, len);
272 mutex_init(&r->res_mutex);
274 INIT_LIST_HEAD(&r->res_lookup);
275 INIT_LIST_HEAD(&r->res_grantqueue);
276 INIT_LIST_HEAD(&r->res_convertqueue);
277 INIT_LIST_HEAD(&r->res_waitqueue);
278 INIT_LIST_HEAD(&r->res_root_list);
279 INIT_LIST_HEAD(&r->res_recover_list);
284 static int search_rsb_list(struct list_head *head, char *name, int len,
285 unsigned int flags, struct dlm_rsb **r_ret)
290 list_for_each_entry(r, head, res_hashchain) {
291 if (len == r->res_length && !memcmp(name, r->res_name, len))
297 if (r->res_nodeid && (flags & R_MASTER))
303 static int _search_rsb(struct dlm_ls *ls, char *name, int len, int b,
304 unsigned int flags, struct dlm_rsb **r_ret)
309 error = search_rsb_list(&ls->ls_rsbtbl[b].list, name, len, flags, &r);
311 kref_get(&r->res_ref);
314 error = search_rsb_list(&ls->ls_rsbtbl[b].toss, name, len, flags, &r);
318 list_move(&r->res_hashchain, &ls->ls_rsbtbl[b].list);
320 if (dlm_no_directory(ls))
323 if (r->res_nodeid == -1) {
324 rsb_clear_flag(r, RSB_MASTER_UNCERTAIN);
325 r->res_first_lkid = 0;
326 } else if (r->res_nodeid > 0) {
327 rsb_set_flag(r, RSB_MASTER_UNCERTAIN);
328 r->res_first_lkid = 0;
330 DLM_ASSERT(r->res_nodeid == 0, dlm_print_rsb(r););
331 DLM_ASSERT(!rsb_flag(r, RSB_MASTER_UNCERTAIN),);
338 static int search_rsb(struct dlm_ls *ls, char *name, int len, int b,
339 unsigned int flags, struct dlm_rsb **r_ret)
342 write_lock(&ls->ls_rsbtbl[b].lock);
343 error = _search_rsb(ls, name, len, b, flags, r_ret);
344 write_unlock(&ls->ls_rsbtbl[b].lock);
349 * Find rsb in rsbtbl and potentially create/add one
351 * Delaying the release of rsb's has a similar benefit to applications keeping
352 * NL locks on an rsb, but without the guarantee that the cached master value
353 * will still be valid when the rsb is reused. Apps aren't always smart enough
354 * to keep NL locks on an rsb that they may lock again shortly; this can lead
355 * to excessive master lookups and removals if we don't delay the release.
357 * Searching for an rsb means looking through both the normal list and toss
358 * list. When found on the toss list the rsb is moved to the normal list with
359 * ref count of 1; when found on normal list the ref count is incremented.
362 static int find_rsb(struct dlm_ls *ls, char *name, int namelen,
363 unsigned int flags, struct dlm_rsb **r_ret)
365 struct dlm_rsb *r, *tmp;
366 uint32_t hash, bucket;
369 if (dlm_no_directory(ls))
372 hash = jhash(name, namelen, 0);
373 bucket = hash & (ls->ls_rsbtbl_size - 1);
375 error = search_rsb(ls, name, namelen, bucket, flags, &r);
379 if (error == -ENOENT && !(flags & R_CREATE))
382 /* the rsb was found but wasn't a master copy */
383 if (error == -ENOTBLK)
387 r = create_rsb(ls, name, namelen);
392 r->res_bucket = bucket;
394 kref_init(&r->res_ref);
396 /* With no directory, the master can be set immediately */
397 if (dlm_no_directory(ls)) {
398 int nodeid = dlm_dir_nodeid(r);
399 if (nodeid == dlm_our_nodeid())
401 r->res_nodeid = nodeid;
404 write_lock(&ls->ls_rsbtbl[bucket].lock);
405 error = _search_rsb(ls, name, namelen, bucket, 0, &tmp);
407 write_unlock(&ls->ls_rsbtbl[bucket].lock);
412 list_add(&r->res_hashchain, &ls->ls_rsbtbl[bucket].list);
413 write_unlock(&ls->ls_rsbtbl[bucket].lock);
420 int dlm_find_rsb(struct dlm_ls *ls, char *name, int namelen,
421 unsigned int flags, struct dlm_rsb **r_ret)
423 return find_rsb(ls, name, namelen, flags, r_ret);
426 /* This is only called to add a reference when the code already holds
427 a valid reference to the rsb, so there's no need for locking. */
429 static inline void hold_rsb(struct dlm_rsb *r)
431 kref_get(&r->res_ref);
434 void dlm_hold_rsb(struct dlm_rsb *r)
439 static void toss_rsb(struct kref *kref)
441 struct dlm_rsb *r = container_of(kref, struct dlm_rsb, res_ref);
442 struct dlm_ls *ls = r->res_ls;
444 DLM_ASSERT(list_empty(&r->res_root_list), dlm_print_rsb(r););
445 kref_init(&r->res_ref);
446 list_move(&r->res_hashchain, &ls->ls_rsbtbl[r->res_bucket].toss);
447 r->res_toss_time = jiffies;
449 free_lvb(r->res_lvbptr);
450 r->res_lvbptr = NULL;
454 /* When all references to the rsb are gone it's transfered to
455 the tossed list for later disposal. */
457 static void put_rsb(struct dlm_rsb *r)
459 struct dlm_ls *ls = r->res_ls;
460 uint32_t bucket = r->res_bucket;
462 write_lock(&ls->ls_rsbtbl[bucket].lock);
463 kref_put(&r->res_ref, toss_rsb);
464 write_unlock(&ls->ls_rsbtbl[bucket].lock);
467 void dlm_put_rsb(struct dlm_rsb *r)
472 /* See comment for unhold_lkb */
474 static void unhold_rsb(struct dlm_rsb *r)
477 rv = kref_put(&r->res_ref, toss_rsb);
478 DLM_ASSERT(!rv, dlm_print_rsb(r););
481 static void kill_rsb(struct kref *kref)
483 struct dlm_rsb *r = container_of(kref, struct dlm_rsb, res_ref);
485 /* All work is done after the return from kref_put() so we
486 can release the write_lock before the remove and free. */
488 DLM_ASSERT(list_empty(&r->res_lookup),);
489 DLM_ASSERT(list_empty(&r->res_grantqueue),);
490 DLM_ASSERT(list_empty(&r->res_convertqueue),);
491 DLM_ASSERT(list_empty(&r->res_waitqueue),);
492 DLM_ASSERT(list_empty(&r->res_root_list),);
493 DLM_ASSERT(list_empty(&r->res_recover_list),);
496 /* Attaching/detaching lkb's from rsb's is for rsb reference counting.
497 The rsb must exist as long as any lkb's for it do. */
499 static void attach_lkb(struct dlm_rsb *r, struct dlm_lkb *lkb)
502 lkb->lkb_resource = r;
505 static void detach_lkb(struct dlm_lkb *lkb)
507 if (lkb->lkb_resource) {
508 put_rsb(lkb->lkb_resource);
509 lkb->lkb_resource = NULL;
513 static int create_lkb(struct dlm_ls *ls, struct dlm_lkb **lkb_ret)
515 struct dlm_lkb *lkb, *tmp;
519 lkb = allocate_lkb(ls);
523 lkb->lkb_nodeid = -1;
524 lkb->lkb_grmode = DLM_LOCK_IV;
525 kref_init(&lkb->lkb_ref);
527 get_random_bytes(&bucket, sizeof(bucket));
528 bucket &= (ls->ls_lkbtbl_size - 1);
530 write_lock(&ls->ls_lkbtbl[bucket].lock);
532 /* counter can roll over so we must verify lkid is not in use */
535 lkid = bucket | (ls->ls_lkbtbl[bucket].counter++ << 16);
537 list_for_each_entry(tmp, &ls->ls_lkbtbl[bucket].list,
539 if (tmp->lkb_id != lkid)
547 list_add(&lkb->lkb_idtbl_list, &ls->ls_lkbtbl[bucket].list);
548 write_unlock(&ls->ls_lkbtbl[bucket].lock);
554 static struct dlm_lkb *__find_lkb(struct dlm_ls *ls, uint32_t lkid)
556 uint16_t bucket = lkid & 0xFFFF;
559 list_for_each_entry(lkb, &ls->ls_lkbtbl[bucket].list, lkb_idtbl_list) {
560 if (lkb->lkb_id == lkid)
566 static int find_lkb(struct dlm_ls *ls, uint32_t lkid, struct dlm_lkb **lkb_ret)
569 uint16_t bucket = lkid & 0xFFFF;
571 if (bucket >= ls->ls_lkbtbl_size)
574 read_lock(&ls->ls_lkbtbl[bucket].lock);
575 lkb = __find_lkb(ls, lkid);
577 kref_get(&lkb->lkb_ref);
578 read_unlock(&ls->ls_lkbtbl[bucket].lock);
581 return lkb ? 0 : -ENOENT;
584 static void kill_lkb(struct kref *kref)
586 struct dlm_lkb *lkb = container_of(kref, struct dlm_lkb, lkb_ref);
588 /* All work is done after the return from kref_put() so we
589 can release the write_lock before the detach_lkb */
591 DLM_ASSERT(!lkb->lkb_status, dlm_print_lkb(lkb););
594 static int put_lkb(struct dlm_lkb *lkb)
596 struct dlm_ls *ls = lkb->lkb_resource->res_ls;
597 uint16_t bucket = lkb->lkb_id & 0xFFFF;
599 write_lock(&ls->ls_lkbtbl[bucket].lock);
600 if (kref_put(&lkb->lkb_ref, kill_lkb)) {
601 list_del(&lkb->lkb_idtbl_list);
602 write_unlock(&ls->ls_lkbtbl[bucket].lock);
606 /* for local/process lkbs, lvbptr points to caller's lksb */
607 if (lkb->lkb_lvbptr && is_master_copy(lkb))
608 free_lvb(lkb->lkb_lvbptr);
612 write_unlock(&ls->ls_lkbtbl[bucket].lock);
617 int dlm_put_lkb(struct dlm_lkb *lkb)
622 /* This is only called to add a reference when the code already holds
623 a valid reference to the lkb, so there's no need for locking. */
625 static inline void hold_lkb(struct dlm_lkb *lkb)
627 kref_get(&lkb->lkb_ref);
630 /* This is called when we need to remove a reference and are certain
631 it's not the last ref. e.g. del_lkb is always called between a
632 find_lkb/put_lkb and is always the inverse of a previous add_lkb.
633 put_lkb would work fine, but would involve unnecessary locking */
635 static inline void unhold_lkb(struct dlm_lkb *lkb)
638 rv = kref_put(&lkb->lkb_ref, kill_lkb);
639 DLM_ASSERT(!rv, dlm_print_lkb(lkb););
642 static void lkb_add_ordered(struct list_head *new, struct list_head *head,
645 struct dlm_lkb *lkb = NULL;
647 list_for_each_entry(lkb, head, lkb_statequeue)
648 if (lkb->lkb_rqmode < mode)
652 list_add_tail(new, head);
654 __list_add(new, lkb->lkb_statequeue.prev, &lkb->lkb_statequeue);
657 /* add/remove lkb to rsb's grant/convert/wait queue */
659 static void add_lkb(struct dlm_rsb *r, struct dlm_lkb *lkb, int status)
661 kref_get(&lkb->lkb_ref);
663 DLM_ASSERT(!lkb->lkb_status, dlm_print_lkb(lkb););
665 lkb->lkb_status = status;
668 case DLM_LKSTS_WAITING:
669 if (lkb->lkb_exflags & DLM_LKF_HEADQUE)
670 list_add(&lkb->lkb_statequeue, &r->res_waitqueue);
672 list_add_tail(&lkb->lkb_statequeue, &r->res_waitqueue);
674 case DLM_LKSTS_GRANTED:
675 /* convention says granted locks kept in order of grmode */
676 lkb_add_ordered(&lkb->lkb_statequeue, &r->res_grantqueue,
679 case DLM_LKSTS_CONVERT:
680 if (lkb->lkb_exflags & DLM_LKF_HEADQUE)
681 list_add(&lkb->lkb_statequeue, &r->res_convertqueue);
683 list_add_tail(&lkb->lkb_statequeue,
684 &r->res_convertqueue);
687 DLM_ASSERT(0, dlm_print_lkb(lkb); printk("sts=%d\n", status););
691 static void del_lkb(struct dlm_rsb *r, struct dlm_lkb *lkb)
694 list_del(&lkb->lkb_statequeue);
698 static void move_lkb(struct dlm_rsb *r, struct dlm_lkb *lkb, int sts)
702 add_lkb(r, lkb, sts);
706 /* add/remove lkb from global waiters list of lkb's waiting for
707 a reply from a remote node */
709 static void add_to_waiters(struct dlm_lkb *lkb, int mstype)
711 struct dlm_ls *ls = lkb->lkb_resource->res_ls;
713 mutex_lock(&ls->ls_waiters_mutex);
714 if (lkb->lkb_wait_type) {
715 log_print("add_to_waiters error %d", lkb->lkb_wait_type);
718 lkb->lkb_wait_type = mstype;
719 kref_get(&lkb->lkb_ref);
720 list_add(&lkb->lkb_wait_reply, &ls->ls_waiters);
722 mutex_unlock(&ls->ls_waiters_mutex);
725 static int _remove_from_waiters(struct dlm_lkb *lkb)
729 if (!lkb->lkb_wait_type) {
730 log_print("remove_from_waiters error");
734 lkb->lkb_wait_type = 0;
735 list_del(&lkb->lkb_wait_reply);
741 static int remove_from_waiters(struct dlm_lkb *lkb)
743 struct dlm_ls *ls = lkb->lkb_resource->res_ls;
746 mutex_lock(&ls->ls_waiters_mutex);
747 error = _remove_from_waiters(lkb);
748 mutex_unlock(&ls->ls_waiters_mutex);
752 static void dir_remove(struct dlm_rsb *r)
756 if (dlm_no_directory(r->res_ls))
759 to_nodeid = dlm_dir_nodeid(r);
760 if (to_nodeid != dlm_our_nodeid())
763 dlm_dir_remove_entry(r->res_ls, to_nodeid,
764 r->res_name, r->res_length);
767 /* FIXME: shouldn't this be able to exit as soon as one non-due rsb is
768 found since they are in order of newest to oldest? */
770 static int shrink_bucket(struct dlm_ls *ls, int b)
773 int count = 0, found;
777 write_lock(&ls->ls_rsbtbl[b].lock);
778 list_for_each_entry_reverse(r, &ls->ls_rsbtbl[b].toss,
780 if (!time_after_eq(jiffies, r->res_toss_time +
781 dlm_config.toss_secs * HZ))
788 write_unlock(&ls->ls_rsbtbl[b].lock);
792 if (kref_put(&r->res_ref, kill_rsb)) {
793 list_del(&r->res_hashchain);
794 write_unlock(&ls->ls_rsbtbl[b].lock);
801 write_unlock(&ls->ls_rsbtbl[b].lock);
802 log_error(ls, "tossed rsb in use %s", r->res_name);
809 void dlm_scan_rsbs(struct dlm_ls *ls)
813 if (dlm_locking_stopped(ls))
816 for (i = 0; i < ls->ls_rsbtbl_size; i++) {
817 shrink_bucket(ls, i);
822 /* lkb is master or local copy */
824 static void set_lvb_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
826 int b, len = r->res_ls->ls_lvblen;
828 /* b=1 lvb returned to caller
829 b=0 lvb written to rsb or invalidated
832 b = dlm_lvb_operations[lkb->lkb_grmode + 1][lkb->lkb_rqmode + 1];
835 if (!lkb->lkb_lvbptr)
838 if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
844 memcpy(lkb->lkb_lvbptr, r->res_lvbptr, len);
845 lkb->lkb_lvbseq = r->res_lvbseq;
848 if (lkb->lkb_exflags & DLM_LKF_IVVALBLK) {
849 rsb_set_flag(r, RSB_VALNOTVALID);
853 if (!lkb->lkb_lvbptr)
856 if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
860 r->res_lvbptr = allocate_lvb(r->res_ls);
865 memcpy(r->res_lvbptr, lkb->lkb_lvbptr, len);
867 lkb->lkb_lvbseq = r->res_lvbseq;
868 rsb_clear_flag(r, RSB_VALNOTVALID);
871 if (rsb_flag(r, RSB_VALNOTVALID))
872 lkb->lkb_sbflags |= DLM_SBF_VALNOTVALID;
875 static void set_lvb_unlock(struct dlm_rsb *r, struct dlm_lkb *lkb)
877 if (lkb->lkb_grmode < DLM_LOCK_PW)
880 if (lkb->lkb_exflags & DLM_LKF_IVVALBLK) {
881 rsb_set_flag(r, RSB_VALNOTVALID);
885 if (!lkb->lkb_lvbptr)
888 if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
892 r->res_lvbptr = allocate_lvb(r->res_ls);
897 memcpy(r->res_lvbptr, lkb->lkb_lvbptr, r->res_ls->ls_lvblen);
899 rsb_clear_flag(r, RSB_VALNOTVALID);
902 /* lkb is process copy (pc) */
904 static void set_lvb_lock_pc(struct dlm_rsb *r, struct dlm_lkb *lkb,
905 struct dlm_message *ms)
909 if (!lkb->lkb_lvbptr)
912 if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
915 b = dlm_lvb_operations[lkb->lkb_grmode + 1][lkb->lkb_rqmode + 1];
917 int len = receive_extralen(ms);
918 memcpy(lkb->lkb_lvbptr, ms->m_extra, len);
919 lkb->lkb_lvbseq = ms->m_lvbseq;
923 /* Manipulate lkb's on rsb's convert/granted/waiting queues
924 remove_lock -- used for unlock, removes lkb from granted
925 revert_lock -- used for cancel, moves lkb from convert to granted
926 grant_lock -- used for request and convert, adds lkb to granted or
927 moves lkb from convert or waiting to granted
929 Each of these is used for master or local copy lkb's. There is
930 also a _pc() variation used to make the corresponding change on
931 a process copy (pc) lkb. */
933 static void _remove_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
936 lkb->lkb_grmode = DLM_LOCK_IV;
937 /* this unhold undoes the original ref from create_lkb()
938 so this leads to the lkb being freed */
942 static void remove_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
944 set_lvb_unlock(r, lkb);
945 _remove_lock(r, lkb);
948 static void remove_lock_pc(struct dlm_rsb *r, struct dlm_lkb *lkb)
950 _remove_lock(r, lkb);
953 static void revert_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
955 lkb->lkb_rqmode = DLM_LOCK_IV;
957 switch (lkb->lkb_status) {
958 case DLM_LKSTS_CONVERT:
959 move_lkb(r, lkb, DLM_LKSTS_GRANTED);
961 case DLM_LKSTS_WAITING:
963 lkb->lkb_grmode = DLM_LOCK_IV;
964 /* this unhold undoes the original ref from create_lkb()
965 so this leads to the lkb being freed */
969 log_print("invalid status for revert %d", lkb->lkb_status);
973 static void revert_lock_pc(struct dlm_rsb *r, struct dlm_lkb *lkb)
978 static void _grant_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
980 if (lkb->lkb_grmode != lkb->lkb_rqmode) {
981 lkb->lkb_grmode = lkb->lkb_rqmode;
983 move_lkb(r, lkb, DLM_LKSTS_GRANTED);
985 add_lkb(r, lkb, DLM_LKSTS_GRANTED);
988 lkb->lkb_rqmode = DLM_LOCK_IV;
991 static void grant_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
993 set_lvb_lock(r, lkb);
995 lkb->lkb_highbast = 0;
998 static void grant_lock_pc(struct dlm_rsb *r, struct dlm_lkb *lkb,
999 struct dlm_message *ms)
1001 set_lvb_lock_pc(r, lkb, ms);
1002 _grant_lock(r, lkb);
1005 /* called by grant_pending_locks() which means an async grant message must
1006 be sent to the requesting node in addition to granting the lock if the
1007 lkb belongs to a remote node. */
1009 static void grant_lock_pending(struct dlm_rsb *r, struct dlm_lkb *lkb)
1012 if (is_master_copy(lkb))
1015 queue_cast(r, lkb, 0);
1018 static inline int first_in_list(struct dlm_lkb *lkb, struct list_head *head)
1020 struct dlm_lkb *first = list_entry(head->next, struct dlm_lkb,
1022 if (lkb->lkb_id == first->lkb_id)
1028 /* Check if the given lkb conflicts with another lkb on the queue. */
1030 static int queue_conflict(struct list_head *head, struct dlm_lkb *lkb)
1032 struct dlm_lkb *this;
1034 list_for_each_entry(this, head, lkb_statequeue) {
1037 if (!modes_compat(this, lkb))
1044 * "A conversion deadlock arises with a pair of lock requests in the converting
1045 * queue for one resource. The granted mode of each lock blocks the requested
1046 * mode of the other lock."
1048 * Part 2: if the granted mode of lkb is preventing the first lkb in the
1049 * convert queue from being granted, then demote lkb (set grmode to NL).
1050 * This second form requires that we check for conv-deadlk even when
1051 * now == 0 in _can_be_granted().
1054 * Granted Queue: empty
1055 * Convert Queue: NL->EX (first lock)
1056 * PR->EX (second lock)
1058 * The first lock can't be granted because of the granted mode of the second
1059 * lock and the second lock can't be granted because it's not first in the
1060 * list. We demote the granted mode of the second lock (the lkb passed to this
1063 * After the resolution, the "grant pending" function needs to go back and try
1064 * to grant locks on the convert queue again since the first lock can now be
1068 static int conversion_deadlock_detect(struct dlm_rsb *rsb, struct dlm_lkb *lkb)
1070 struct dlm_lkb *this, *first = NULL, *self = NULL;
1072 list_for_each_entry(this, &rsb->res_convertqueue, lkb_statequeue) {
1080 if (!modes_compat(this, lkb) && !modes_compat(lkb, this))
1084 /* if lkb is on the convert queue and is preventing the first
1085 from being granted, then there's deadlock and we demote lkb.
1086 multiple converting locks may need to do this before the first
1087 converting lock can be granted. */
1089 if (self && self != first) {
1090 if (!modes_compat(lkb, first) &&
1091 !queue_conflict(&rsb->res_grantqueue, first))
1099 * Return 1 if the lock can be granted, 0 otherwise.
1100 * Also detect and resolve conversion deadlocks.
1102 * lkb is the lock to be granted
1104 * now is 1 if the function is being called in the context of the
1105 * immediate request, it is 0 if called later, after the lock has been
1108 * References are from chapter 6 of "VAXcluster Principles" by Roy Davis
1111 static int _can_be_granted(struct dlm_rsb *r, struct dlm_lkb *lkb, int now)
1113 int8_t conv = (lkb->lkb_grmode != DLM_LOCK_IV);
1116 * 6-10: Version 5.4 introduced an option to address the phenomenon of
1117 * a new request for a NL mode lock being blocked.
1119 * 6-11: If the optional EXPEDITE flag is used with the new NL mode
1120 * request, then it would be granted. In essence, the use of this flag
1121 * tells the Lock Manager to expedite theis request by not considering
1122 * what may be in the CONVERTING or WAITING queues... As of this
1123 * writing, the EXPEDITE flag can be used only with new requests for NL
1124 * mode locks. This flag is not valid for conversion requests.
1126 * A shortcut. Earlier checks return an error if EXPEDITE is used in a
1127 * conversion or used with a non-NL requested mode. We also know an
1128 * EXPEDITE request is always granted immediately, so now must always
1129 * be 1. The full condition to grant an expedite request: (now &&
1130 * !conv && lkb->rqmode == DLM_LOCK_NL && (flags & EXPEDITE)) can
1131 * therefore be shortened to just checking the flag.
1134 if (lkb->lkb_exflags & DLM_LKF_EXPEDITE)
1138 * A shortcut. Without this, !queue_conflict(grantqueue, lkb) would be
1139 * added to the remaining conditions.
1142 if (queue_conflict(&r->res_grantqueue, lkb))
1146 * 6-3: By default, a conversion request is immediately granted if the
1147 * requested mode is compatible with the modes of all other granted
1151 if (queue_conflict(&r->res_convertqueue, lkb))
1155 * 6-5: But the default algorithm for deciding whether to grant or
1156 * queue conversion requests does not by itself guarantee that such
1157 * requests are serviced on a "first come first serve" basis. This, in
1158 * turn, can lead to a phenomenon known as "indefinate postponement".
1160 * 6-7: This issue is dealt with by using the optional QUECVT flag with
1161 * the system service employed to request a lock conversion. This flag
1162 * forces certain conversion requests to be queued, even if they are
1163 * compatible with the granted modes of other locks on the same
1164 * resource. Thus, the use of this flag results in conversion requests
1165 * being ordered on a "first come first servce" basis.
1167 * DCT: This condition is all about new conversions being able to occur
1168 * "in place" while the lock remains on the granted queue (assuming
1169 * nothing else conflicts.) IOW if QUECVT isn't set, a conversion
1170 * doesn't _have_ to go onto the convert queue where it's processed in
1171 * order. The "now" variable is necessary to distinguish converts
1172 * being received and processed for the first time now, because once a
1173 * convert is moved to the conversion queue the condition below applies
1174 * requiring fifo granting.
1177 if (now && conv && !(lkb->lkb_exflags & DLM_LKF_QUECVT))
1181 * The NOORDER flag is set to avoid the standard vms rules on grant
1185 if (lkb->lkb_exflags & DLM_LKF_NOORDER)
1189 * 6-3: Once in that queue [CONVERTING], a conversion request cannot be
1190 * granted until all other conversion requests ahead of it are granted
1194 if (!now && conv && first_in_list(lkb, &r->res_convertqueue))
1198 * 6-4: By default, a new request is immediately granted only if all
1199 * three of the following conditions are satisfied when the request is
1201 * - The queue of ungranted conversion requests for the resource is
1203 * - The queue of ungranted new requests for the resource is empty.
1204 * - The mode of the new request is compatible with the most
1205 * restrictive mode of all granted locks on the resource.
1208 if (now && !conv && list_empty(&r->res_convertqueue) &&
1209 list_empty(&r->res_waitqueue))
1213 * 6-4: Once a lock request is in the queue of ungranted new requests,
1214 * it cannot be granted until the queue of ungranted conversion
1215 * requests is empty, all ungranted new requests ahead of it are
1216 * granted and/or canceled, and it is compatible with the granted mode
1217 * of the most restrictive lock granted on the resource.
1220 if (!now && !conv && list_empty(&r->res_convertqueue) &&
1221 first_in_list(lkb, &r->res_waitqueue))
1226 * The following, enabled by CONVDEADLK, departs from VMS.
1229 if (conv && (lkb->lkb_exflags & DLM_LKF_CONVDEADLK) &&
1230 conversion_deadlock_detect(r, lkb)) {
1231 lkb->lkb_grmode = DLM_LOCK_NL;
1232 lkb->lkb_sbflags |= DLM_SBF_DEMOTED;
1239 * The ALTPR and ALTCW flags aren't traditional lock manager flags, but are a
1240 * simple way to provide a big optimization to applications that can use them.
1243 static int can_be_granted(struct dlm_rsb *r, struct dlm_lkb *lkb, int now)
1245 uint32_t flags = lkb->lkb_exflags;
1247 int8_t alt = 0, rqmode = lkb->lkb_rqmode;
1249 rv = _can_be_granted(r, lkb, now);
1253 if (lkb->lkb_sbflags & DLM_SBF_DEMOTED)
1256 if (rqmode != DLM_LOCK_PR && flags & DLM_LKF_ALTPR)
1258 else if (rqmode != DLM_LOCK_CW && flags & DLM_LKF_ALTCW)
1262 lkb->lkb_rqmode = alt;
1263 rv = _can_be_granted(r, lkb, now);
1265 lkb->lkb_sbflags |= DLM_SBF_ALTMODE;
1267 lkb->lkb_rqmode = rqmode;
1273 static int grant_pending_convert(struct dlm_rsb *r, int high)
1275 struct dlm_lkb *lkb, *s;
1276 int hi, demoted, quit, grant_restart, demote_restart;
1284 list_for_each_entry_safe(lkb, s, &r->res_convertqueue, lkb_statequeue) {
1285 demoted = is_demoted(lkb);
1286 if (can_be_granted(r, lkb, 0)) {
1287 grant_lock_pending(r, lkb);
1290 hi = max_t(int, lkb->lkb_rqmode, hi);
1291 if (!demoted && is_demoted(lkb))
1298 if (demote_restart && !quit) {
1303 return max_t(int, high, hi);
1306 static int grant_pending_wait(struct dlm_rsb *r, int high)
1308 struct dlm_lkb *lkb, *s;
1310 list_for_each_entry_safe(lkb, s, &r->res_waitqueue, lkb_statequeue) {
1311 if (can_be_granted(r, lkb, 0))
1312 grant_lock_pending(r, lkb);
1314 high = max_t(int, lkb->lkb_rqmode, high);
1320 static void grant_pending_locks(struct dlm_rsb *r)
1322 struct dlm_lkb *lkb, *s;
1323 int high = DLM_LOCK_IV;
1325 DLM_ASSERT(is_master(r), dlm_print_rsb(r););
1327 high = grant_pending_convert(r, high);
1328 high = grant_pending_wait(r, high);
1330 if (high == DLM_LOCK_IV)
1334 * If there are locks left on the wait/convert queue then send blocking
1335 * ASTs to granted locks based on the largest requested mode (high)
1336 * found above. FIXME: highbast < high comparison not valid for PR/CW.
1339 list_for_each_entry_safe(lkb, s, &r->res_grantqueue, lkb_statequeue) {
1340 if (lkb->lkb_bastaddr && (lkb->lkb_highbast < high) &&
1341 !__dlm_compat_matrix[lkb->lkb_grmode+1][high+1]) {
1342 queue_bast(r, lkb, high);
1343 lkb->lkb_highbast = high;
1348 static void send_bast_queue(struct dlm_rsb *r, struct list_head *head,
1349 struct dlm_lkb *lkb)
1353 list_for_each_entry(gr, head, lkb_statequeue) {
1354 if (gr->lkb_bastaddr &&
1355 gr->lkb_highbast < lkb->lkb_rqmode &&
1356 !modes_compat(gr, lkb)) {
1357 queue_bast(r, gr, lkb->lkb_rqmode);
1358 gr->lkb_highbast = lkb->lkb_rqmode;
1363 static void send_blocking_asts(struct dlm_rsb *r, struct dlm_lkb *lkb)
1365 send_bast_queue(r, &r->res_grantqueue, lkb);
1368 static void send_blocking_asts_all(struct dlm_rsb *r, struct dlm_lkb *lkb)
1370 send_bast_queue(r, &r->res_grantqueue, lkb);
1371 send_bast_queue(r, &r->res_convertqueue, lkb);
1374 /* set_master(r, lkb) -- set the master nodeid of a resource
1376 The purpose of this function is to set the nodeid field in the given
1377 lkb using the nodeid field in the given rsb. If the rsb's nodeid is
1378 known, it can just be copied to the lkb and the function will return
1379 0. If the rsb's nodeid is _not_ known, it needs to be looked up
1380 before it can be copied to the lkb.
1382 When the rsb nodeid is being looked up remotely, the initial lkb
1383 causing the lookup is kept on the ls_waiters list waiting for the
1384 lookup reply. Other lkb's waiting for the same rsb lookup are kept
1385 on the rsb's res_lookup list until the master is verified.
1388 0: nodeid is set in rsb/lkb and the caller should go ahead and use it
1389 1: the rsb master is not available and the lkb has been placed on
1393 static int set_master(struct dlm_rsb *r, struct dlm_lkb *lkb)
1395 struct dlm_ls *ls = r->res_ls;
1396 int error, dir_nodeid, ret_nodeid, our_nodeid = dlm_our_nodeid();
1398 if (rsb_flag(r, RSB_MASTER_UNCERTAIN)) {
1399 rsb_clear_flag(r, RSB_MASTER_UNCERTAIN);
1400 r->res_first_lkid = lkb->lkb_id;
1401 lkb->lkb_nodeid = r->res_nodeid;
1405 if (r->res_first_lkid && r->res_first_lkid != lkb->lkb_id) {
1406 list_add_tail(&lkb->lkb_rsb_lookup, &r->res_lookup);
1410 if (r->res_nodeid == 0) {
1411 lkb->lkb_nodeid = 0;
1415 if (r->res_nodeid > 0) {
1416 lkb->lkb_nodeid = r->res_nodeid;
1420 DLM_ASSERT(r->res_nodeid == -1, dlm_print_rsb(r););
1422 dir_nodeid = dlm_dir_nodeid(r);
1424 if (dir_nodeid != our_nodeid) {
1425 r->res_first_lkid = lkb->lkb_id;
1426 send_lookup(r, lkb);
1431 /* It's possible for dlm_scand to remove an old rsb for
1432 this same resource from the toss list, us to create
1433 a new one, look up the master locally, and find it
1434 already exists just before dlm_scand does the
1435 dir_remove() on the previous rsb. */
1437 error = dlm_dir_lookup(ls, our_nodeid, r->res_name,
1438 r->res_length, &ret_nodeid);
1441 log_debug(ls, "dir_lookup error %d %s", error, r->res_name);
1445 if (ret_nodeid == our_nodeid) {
1446 r->res_first_lkid = 0;
1448 lkb->lkb_nodeid = 0;
1450 r->res_first_lkid = lkb->lkb_id;
1451 r->res_nodeid = ret_nodeid;
1452 lkb->lkb_nodeid = ret_nodeid;
1457 static void process_lookup_list(struct dlm_rsb *r)
1459 struct dlm_lkb *lkb, *safe;
1461 list_for_each_entry_safe(lkb, safe, &r->res_lookup, lkb_rsb_lookup) {
1462 list_del(&lkb->lkb_rsb_lookup);
1463 _request_lock(r, lkb);
1468 /* confirm_master -- confirm (or deny) an rsb's master nodeid */
1470 static void confirm_master(struct dlm_rsb *r, int error)
1472 struct dlm_lkb *lkb;
1474 if (!r->res_first_lkid)
1480 r->res_first_lkid = 0;
1481 process_lookup_list(r);
1485 /* the remote master didn't queue our NOQUEUE request;
1486 make a waiting lkb the first_lkid */
1488 r->res_first_lkid = 0;
1490 if (!list_empty(&r->res_lookup)) {
1491 lkb = list_entry(r->res_lookup.next, struct dlm_lkb,
1493 list_del(&lkb->lkb_rsb_lookup);
1494 r->res_first_lkid = lkb->lkb_id;
1495 _request_lock(r, lkb);
1501 log_error(r->res_ls, "confirm_master unknown error %d", error);
1505 static int set_lock_args(int mode, struct dlm_lksb *lksb, uint32_t flags,
1506 int namelen, uint32_t parent_lkid, void *ast,
1507 void *astarg, void *bast, struct dlm_args *args)
1511 /* check for invalid arg usage */
1513 if (mode < 0 || mode > DLM_LOCK_EX)
1516 if (!(flags & DLM_LKF_CONVERT) && (namelen > DLM_RESNAME_MAXLEN))
1519 if (flags & DLM_LKF_CANCEL)
1522 if (flags & DLM_LKF_QUECVT && !(flags & DLM_LKF_CONVERT))
1525 if (flags & DLM_LKF_CONVDEADLK && !(flags & DLM_LKF_CONVERT))
1528 if (flags & DLM_LKF_CONVDEADLK && flags & DLM_LKF_NOQUEUE)
1531 if (flags & DLM_LKF_EXPEDITE && flags & DLM_LKF_CONVERT)
1534 if (flags & DLM_LKF_EXPEDITE && flags & DLM_LKF_QUECVT)
1537 if (flags & DLM_LKF_EXPEDITE && flags & DLM_LKF_NOQUEUE)
1540 if (flags & DLM_LKF_EXPEDITE && mode != DLM_LOCK_NL)
1546 if (flags & DLM_LKF_VALBLK && !lksb->sb_lvbptr)
1549 /* parent/child locks not yet supported */
1553 if (flags & DLM_LKF_CONVERT && !lksb->sb_lkid)
1556 /* these args will be copied to the lkb in validate_lock_args,
1557 it cannot be done now because when converting locks, fields in
1558 an active lkb cannot be modified before locking the rsb */
1560 args->flags = flags;
1561 args->astaddr = ast;
1562 args->astparam = (long) astarg;
1563 args->bastaddr = bast;
1571 static int set_unlock_args(uint32_t flags, void *astarg, struct dlm_args *args)
1573 if (flags & ~(DLM_LKF_CANCEL | DLM_LKF_VALBLK | DLM_LKF_IVVALBLK |
1574 DLM_LKF_FORCEUNLOCK))
1577 args->flags = flags;
1578 args->astparam = (long) astarg;
1582 static int validate_lock_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
1583 struct dlm_args *args)
1587 if (args->flags & DLM_LKF_CONVERT) {
1588 if (lkb->lkb_flags & DLM_IFL_MSTCPY)
1591 if (args->flags & DLM_LKF_QUECVT &&
1592 !__quecvt_compat_matrix[lkb->lkb_grmode+1][args->mode+1])
1596 if (lkb->lkb_status != DLM_LKSTS_GRANTED)
1599 if (lkb->lkb_wait_type)
1603 lkb->lkb_exflags = args->flags;
1604 lkb->lkb_sbflags = 0;
1605 lkb->lkb_astaddr = args->astaddr;
1606 lkb->lkb_astparam = args->astparam;
1607 lkb->lkb_bastaddr = args->bastaddr;
1608 lkb->lkb_rqmode = args->mode;
1609 lkb->lkb_lksb = args->lksb;
1610 lkb->lkb_lvbptr = args->lksb->sb_lvbptr;
1611 lkb->lkb_ownpid = (int) current->pid;
1617 static int validate_unlock_args(struct dlm_lkb *lkb, struct dlm_args *args)
1621 if (lkb->lkb_flags & DLM_IFL_MSTCPY)
1624 if (args->flags & DLM_LKF_FORCEUNLOCK)
1627 if (args->flags & DLM_LKF_CANCEL &&
1628 lkb->lkb_status == DLM_LKSTS_GRANTED)
1631 if (!(args->flags & DLM_LKF_CANCEL) &&
1632 lkb->lkb_status != DLM_LKSTS_GRANTED)
1636 if (lkb->lkb_wait_type)
1640 lkb->lkb_exflags = args->flags;
1641 lkb->lkb_sbflags = 0;
1642 lkb->lkb_astparam = args->astparam;
1650 * Four stage 4 varieties:
1651 * do_request(), do_convert(), do_unlock(), do_cancel()
1652 * These are called on the master node for the given lock and
1653 * from the central locking logic.
1656 static int do_request(struct dlm_rsb *r, struct dlm_lkb *lkb)
1660 if (can_be_granted(r, lkb, 1)) {
1662 queue_cast(r, lkb, 0);
1666 if (can_be_queued(lkb)) {
1667 error = -EINPROGRESS;
1668 add_lkb(r, lkb, DLM_LKSTS_WAITING);
1669 send_blocking_asts(r, lkb);
1674 if (force_blocking_asts(lkb))
1675 send_blocking_asts_all(r, lkb);
1676 queue_cast(r, lkb, -EAGAIN);
1682 static int do_convert(struct dlm_rsb *r, struct dlm_lkb *lkb)
1686 /* changing an existing lock may allow others to be granted */
1688 if (can_be_granted(r, lkb, 1)) {
1690 queue_cast(r, lkb, 0);
1691 grant_pending_locks(r);
1695 if (can_be_queued(lkb)) {
1696 if (is_demoted(lkb))
1697 grant_pending_locks(r);
1698 error = -EINPROGRESS;
1700 add_lkb(r, lkb, DLM_LKSTS_CONVERT);
1701 send_blocking_asts(r, lkb);
1706 if (force_blocking_asts(lkb))
1707 send_blocking_asts_all(r, lkb);
1708 queue_cast(r, lkb, -EAGAIN);
1714 static int do_unlock(struct dlm_rsb *r, struct dlm_lkb *lkb)
1716 remove_lock(r, lkb);
1717 queue_cast(r, lkb, -DLM_EUNLOCK);
1718 grant_pending_locks(r);
1719 return -DLM_EUNLOCK;
1722 static int do_cancel(struct dlm_rsb *r, struct dlm_lkb *lkb)
1724 revert_lock(r, lkb);
1725 queue_cast(r, lkb, -DLM_ECANCEL);
1726 grant_pending_locks(r);
1727 return -DLM_ECANCEL;
1731 * Four stage 3 varieties:
1732 * _request_lock(), _convert_lock(), _unlock_lock(), _cancel_lock()
1735 /* add a new lkb to a possibly new rsb, called by requesting process */
1737 static int _request_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
1741 /* set_master: sets lkb nodeid from r */
1743 error = set_master(r, lkb);
1752 /* receive_request() calls do_request() on remote node */
1753 error = send_request(r, lkb);
1755 error = do_request(r, lkb);
1760 /* change some property of an existing lkb, e.g. mode */
1762 static int _convert_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
1767 /* receive_convert() calls do_convert() on remote node */
1768 error = send_convert(r, lkb);
1770 error = do_convert(r, lkb);
1775 /* remove an existing lkb from the granted queue */
1777 static int _unlock_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
1782 /* receive_unlock() calls do_unlock() on remote node */
1783 error = send_unlock(r, lkb);
1785 error = do_unlock(r, lkb);
1790 /* remove an existing lkb from the convert or wait queue */
1792 static int _cancel_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
1797 /* receive_cancel() calls do_cancel() on remote node */
1798 error = send_cancel(r, lkb);
1800 error = do_cancel(r, lkb);
1806 * Four stage 2 varieties:
1807 * request_lock(), convert_lock(), unlock_lock(), cancel_lock()
1810 static int request_lock(struct dlm_ls *ls, struct dlm_lkb *lkb, char *name,
1811 int len, struct dlm_args *args)
1816 error = validate_lock_args(ls, lkb, args);
1820 error = find_rsb(ls, name, len, R_CREATE, &r);
1827 lkb->lkb_lksb->sb_lkid = lkb->lkb_id;
1829 error = _request_lock(r, lkb);
1838 static int convert_lock(struct dlm_ls *ls, struct dlm_lkb *lkb,
1839 struct dlm_args *args)
1844 r = lkb->lkb_resource;
1849 error = validate_lock_args(ls, lkb, args);
1853 error = _convert_lock(r, lkb);
1860 static int unlock_lock(struct dlm_ls *ls, struct dlm_lkb *lkb,
1861 struct dlm_args *args)
1866 r = lkb->lkb_resource;
1871 error = validate_unlock_args(lkb, args);
1875 error = _unlock_lock(r, lkb);
1882 static int cancel_lock(struct dlm_ls *ls, struct dlm_lkb *lkb,
1883 struct dlm_args *args)
1888 r = lkb->lkb_resource;
1893 error = validate_unlock_args(lkb, args);
1897 error = _cancel_lock(r, lkb);
1905 * Two stage 1 varieties: dlm_lock() and dlm_unlock()
1908 int dlm_lock(dlm_lockspace_t *lockspace,
1910 struct dlm_lksb *lksb,
1913 unsigned int namelen,
1914 uint32_t parent_lkid,
1915 void (*ast) (void *astarg),
1917 void (*bast) (void *astarg, int mode))
1920 struct dlm_lkb *lkb;
1921 struct dlm_args args;
1922 int error, convert = flags & DLM_LKF_CONVERT;
1924 ls = dlm_find_lockspace_local(lockspace);
1931 error = find_lkb(ls, lksb->sb_lkid, &lkb);
1933 error = create_lkb(ls, &lkb);
1938 error = set_lock_args(mode, lksb, flags, namelen, parent_lkid, ast,
1939 astarg, bast, &args);
1944 error = convert_lock(ls, lkb, &args);
1946 error = request_lock(ls, lkb, name, namelen, &args);
1948 if (error == -EINPROGRESS)
1951 if (convert || error)
1953 if (error == -EAGAIN)
1956 unlock_recovery(ls);
1957 dlm_put_lockspace(ls);
1961 int dlm_unlock(dlm_lockspace_t *lockspace,
1964 struct dlm_lksb *lksb,
1968 struct dlm_lkb *lkb;
1969 struct dlm_args args;
1972 ls = dlm_find_lockspace_local(lockspace);
1978 error = find_lkb(ls, lkid, &lkb);
1982 error = set_unlock_args(flags, astarg, &args);
1986 if (flags & DLM_LKF_CANCEL)
1987 error = cancel_lock(ls, lkb, &args);
1989 error = unlock_lock(ls, lkb, &args);
1991 if (error == -DLM_EUNLOCK || error == -DLM_ECANCEL)
1996 unlock_recovery(ls);
1997 dlm_put_lockspace(ls);
2002 * send/receive routines for remote operations and replies
2006 * send_request receive_request
2007 * send_convert receive_convert
2008 * send_unlock receive_unlock
2009 * send_cancel receive_cancel
2010 * send_grant receive_grant
2011 * send_bast receive_bast
2012 * send_lookup receive_lookup
2013 * send_remove receive_remove
2016 * receive_request_reply send_request_reply
2017 * receive_convert_reply send_convert_reply
2018 * receive_unlock_reply send_unlock_reply
2019 * receive_cancel_reply send_cancel_reply
2020 * receive_lookup_reply send_lookup_reply
2023 static int create_message(struct dlm_rsb *r, struct dlm_lkb *lkb,
2024 int to_nodeid, int mstype,
2025 struct dlm_message **ms_ret,
2026 struct dlm_mhandle **mh_ret)
2028 struct dlm_message *ms;
2029 struct dlm_mhandle *mh;
2031 int mb_len = sizeof(struct dlm_message);
2034 case DLM_MSG_REQUEST:
2035 case DLM_MSG_LOOKUP:
2036 case DLM_MSG_REMOVE:
2037 mb_len += r->res_length;
2039 case DLM_MSG_CONVERT:
2040 case DLM_MSG_UNLOCK:
2041 case DLM_MSG_REQUEST_REPLY:
2042 case DLM_MSG_CONVERT_REPLY:
2044 if (lkb && lkb->lkb_lvbptr)
2045 mb_len += r->res_ls->ls_lvblen;
2049 /* get_buffer gives us a message handle (mh) that we need to
2050 pass into lowcomms_commit and a message buffer (mb) that we
2051 write our data into */
2053 mh = dlm_lowcomms_get_buffer(to_nodeid, mb_len, GFP_KERNEL, &mb);
2057 memset(mb, 0, mb_len);
2059 ms = (struct dlm_message *) mb;
2061 ms->m_header.h_version = (DLM_HEADER_MAJOR | DLM_HEADER_MINOR);
2062 ms->m_header.h_lockspace = r->res_ls->ls_global_id;
2063 ms->m_header.h_nodeid = dlm_our_nodeid();
2064 ms->m_header.h_length = mb_len;
2065 ms->m_header.h_cmd = DLM_MSG;
2067 ms->m_type = mstype;
2074 /* further lowcomms enhancements or alternate implementations may make
2075 the return value from this function useful at some point */
2077 static int send_message(struct dlm_mhandle *mh, struct dlm_message *ms)
2079 dlm_message_out(ms);
2080 dlm_lowcomms_commit_buffer(mh);
2084 static void send_args(struct dlm_rsb *r, struct dlm_lkb *lkb,
2085 struct dlm_message *ms)
2087 ms->m_nodeid = lkb->lkb_nodeid;
2088 ms->m_pid = lkb->lkb_ownpid;
2089 ms->m_lkid = lkb->lkb_id;
2090 ms->m_remid = lkb->lkb_remid;
2091 ms->m_exflags = lkb->lkb_exflags;
2092 ms->m_sbflags = lkb->lkb_sbflags;
2093 ms->m_flags = lkb->lkb_flags;
2094 ms->m_lvbseq = lkb->lkb_lvbseq;
2095 ms->m_status = lkb->lkb_status;
2096 ms->m_grmode = lkb->lkb_grmode;
2097 ms->m_rqmode = lkb->lkb_rqmode;
2098 ms->m_hash = r->res_hash;
2100 /* m_result and m_bastmode are set from function args,
2101 not from lkb fields */
2103 if (lkb->lkb_bastaddr)
2104 ms->m_asts |= AST_BAST;
2105 if (lkb->lkb_astaddr)
2106 ms->m_asts |= AST_COMP;
2108 if (ms->m_type == DLM_MSG_REQUEST || ms->m_type == DLM_MSG_LOOKUP)
2109 memcpy(ms->m_extra, r->res_name, r->res_length);
2111 else if (lkb->lkb_lvbptr)
2112 memcpy(ms->m_extra, lkb->lkb_lvbptr, r->res_ls->ls_lvblen);
2116 static int send_common(struct dlm_rsb *r, struct dlm_lkb *lkb, int mstype)
2118 struct dlm_message *ms;
2119 struct dlm_mhandle *mh;
2120 int to_nodeid, error;
2122 add_to_waiters(lkb, mstype);
2124 to_nodeid = r->res_nodeid;
2126 error = create_message(r, lkb, to_nodeid, mstype, &ms, &mh);
2130 send_args(r, lkb, ms);
2132 error = send_message(mh, ms);
2138 remove_from_waiters(lkb);
2142 static int send_request(struct dlm_rsb *r, struct dlm_lkb *lkb)
2144 return send_common(r, lkb, DLM_MSG_REQUEST);
2147 static int send_convert(struct dlm_rsb *r, struct dlm_lkb *lkb)
2151 error = send_common(r, lkb, DLM_MSG_CONVERT);
2153 /* down conversions go without a reply from the master */
2154 if (!error && down_conversion(lkb)) {
2155 remove_from_waiters(lkb);
2156 r->res_ls->ls_stub_ms.m_result = 0;
2157 __receive_convert_reply(r, lkb, &r->res_ls->ls_stub_ms);
2163 /* FIXME: if this lkb is the only lock we hold on the rsb, then set
2164 MASTER_UNCERTAIN to force the next request on the rsb to confirm
2165 that the master is still correct. */
2167 static int send_unlock(struct dlm_rsb *r, struct dlm_lkb *lkb)
2169 return send_common(r, lkb, DLM_MSG_UNLOCK);
2172 static int send_cancel(struct dlm_rsb *r, struct dlm_lkb *lkb)
2174 return send_common(r, lkb, DLM_MSG_CANCEL);
2177 static int send_grant(struct dlm_rsb *r, struct dlm_lkb *lkb)
2179 struct dlm_message *ms;
2180 struct dlm_mhandle *mh;
2181 int to_nodeid, error;
2183 to_nodeid = lkb->lkb_nodeid;
2185 error = create_message(r, lkb, to_nodeid, DLM_MSG_GRANT, &ms, &mh);
2189 send_args(r, lkb, ms);
2193 error = send_message(mh, ms);
2198 static int send_bast(struct dlm_rsb *r, struct dlm_lkb *lkb, int mode)
2200 struct dlm_message *ms;
2201 struct dlm_mhandle *mh;
2202 int to_nodeid, error;
2204 to_nodeid = lkb->lkb_nodeid;
2206 error = create_message(r, NULL, to_nodeid, DLM_MSG_BAST, &ms, &mh);
2210 send_args(r, lkb, ms);
2212 ms->m_bastmode = mode;
2214 error = send_message(mh, ms);
2219 static int send_lookup(struct dlm_rsb *r, struct dlm_lkb *lkb)
2221 struct dlm_message *ms;
2222 struct dlm_mhandle *mh;
2223 int to_nodeid, error;
2225 add_to_waiters(lkb, DLM_MSG_LOOKUP);
2227 to_nodeid = dlm_dir_nodeid(r);
2229 error = create_message(r, NULL, to_nodeid, DLM_MSG_LOOKUP, &ms, &mh);
2233 send_args(r, lkb, ms);
2235 error = send_message(mh, ms);
2241 remove_from_waiters(lkb);
2245 static int send_remove(struct dlm_rsb *r)
2247 struct dlm_message *ms;
2248 struct dlm_mhandle *mh;
2249 int to_nodeid, error;
2251 to_nodeid = dlm_dir_nodeid(r);
2253 error = create_message(r, NULL, to_nodeid, DLM_MSG_REMOVE, &ms, &mh);
2257 memcpy(ms->m_extra, r->res_name, r->res_length);
2258 ms->m_hash = r->res_hash;
2260 error = send_message(mh, ms);
2265 static int send_common_reply(struct dlm_rsb *r, struct dlm_lkb *lkb,
2268 struct dlm_message *ms;
2269 struct dlm_mhandle *mh;
2270 int to_nodeid, error;
2272 to_nodeid = lkb->lkb_nodeid;
2274 error = create_message(r, lkb, to_nodeid, mstype, &ms, &mh);
2278 send_args(r, lkb, ms);
2282 error = send_message(mh, ms);
2287 static int send_request_reply(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
2289 return send_common_reply(r, lkb, DLM_MSG_REQUEST_REPLY, rv);
2292 static int send_convert_reply(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
2294 return send_common_reply(r, lkb, DLM_MSG_CONVERT_REPLY, rv);
2297 static int send_unlock_reply(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
2299 return send_common_reply(r, lkb, DLM_MSG_UNLOCK_REPLY, rv);
2302 static int send_cancel_reply(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
2304 return send_common_reply(r, lkb, DLM_MSG_CANCEL_REPLY, rv);
2307 static int send_lookup_reply(struct dlm_ls *ls, struct dlm_message *ms_in,
2308 int ret_nodeid, int rv)
2310 struct dlm_rsb *r = &ls->ls_stub_rsb;
2311 struct dlm_message *ms;
2312 struct dlm_mhandle *mh;
2313 int error, nodeid = ms_in->m_header.h_nodeid;
2315 error = create_message(r, NULL, nodeid, DLM_MSG_LOOKUP_REPLY, &ms, &mh);
2319 ms->m_lkid = ms_in->m_lkid;
2321 ms->m_nodeid = ret_nodeid;
2323 error = send_message(mh, ms);
2328 /* which args we save from a received message depends heavily on the type
2329 of message, unlike the send side where we can safely send everything about
2330 the lkb for any type of message */
2332 static void receive_flags(struct dlm_lkb *lkb, struct dlm_message *ms)
2334 lkb->lkb_exflags = ms->m_exflags;
2335 lkb->lkb_flags = (lkb->lkb_flags & 0xFFFF0000) |
2336 (ms->m_flags & 0x0000FFFF);
2339 static void receive_flags_reply(struct dlm_lkb *lkb, struct dlm_message *ms)
2341 lkb->lkb_sbflags = ms->m_sbflags;
2342 lkb->lkb_flags = (lkb->lkb_flags & 0xFFFF0000) |
2343 (ms->m_flags & 0x0000FFFF);
2346 static int receive_extralen(struct dlm_message *ms)
2348 return (ms->m_header.h_length - sizeof(struct dlm_message));
2351 static int receive_lvb(struct dlm_ls *ls, struct dlm_lkb *lkb,
2352 struct dlm_message *ms)
2356 if (lkb->lkb_exflags & DLM_LKF_VALBLK) {
2357 if (!lkb->lkb_lvbptr)
2358 lkb->lkb_lvbptr = allocate_lvb(ls);
2359 if (!lkb->lkb_lvbptr)
2361 len = receive_extralen(ms);
2362 memcpy(lkb->lkb_lvbptr, ms->m_extra, len);
2367 static int receive_request_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
2368 struct dlm_message *ms)
2370 lkb->lkb_nodeid = ms->m_header.h_nodeid;
2371 lkb->lkb_ownpid = ms->m_pid;
2372 lkb->lkb_remid = ms->m_lkid;
2373 lkb->lkb_grmode = DLM_LOCK_IV;
2374 lkb->lkb_rqmode = ms->m_rqmode;
2375 lkb->lkb_bastaddr = (void *) (long) (ms->m_asts & AST_BAST);
2376 lkb->lkb_astaddr = (void *) (long) (ms->m_asts & AST_COMP);
2378 DLM_ASSERT(is_master_copy(lkb), dlm_print_lkb(lkb););
2380 if (receive_lvb(ls, lkb, ms))
2386 static int receive_convert_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
2387 struct dlm_message *ms)
2389 if (lkb->lkb_nodeid != ms->m_header.h_nodeid) {
2390 log_error(ls, "convert_args nodeid %d %d lkid %x %x",
2391 lkb->lkb_nodeid, ms->m_header.h_nodeid,
2392 lkb->lkb_id, lkb->lkb_remid);
2396 if (!is_master_copy(lkb))
2399 if (lkb->lkb_status != DLM_LKSTS_GRANTED)
2402 if (receive_lvb(ls, lkb, ms))
2405 lkb->lkb_rqmode = ms->m_rqmode;
2406 lkb->lkb_lvbseq = ms->m_lvbseq;
2411 static int receive_unlock_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
2412 struct dlm_message *ms)
2414 if (!is_master_copy(lkb))
2416 if (receive_lvb(ls, lkb, ms))
2421 /* We fill in the stub-lkb fields with the info that send_xxxx_reply()
2422 uses to send a reply and that the remote end uses to process the reply. */
2424 static void setup_stub_lkb(struct dlm_ls *ls, struct dlm_message *ms)
2426 struct dlm_lkb *lkb = &ls->ls_stub_lkb;
2427 lkb->lkb_nodeid = ms->m_header.h_nodeid;
2428 lkb->lkb_remid = ms->m_lkid;
2431 static void receive_request(struct dlm_ls *ls, struct dlm_message *ms)
2433 struct dlm_lkb *lkb;
2437 error = create_lkb(ls, &lkb);
2441 receive_flags(lkb, ms);
2442 lkb->lkb_flags |= DLM_IFL_MSTCPY;
2443 error = receive_request_args(ls, lkb, ms);
2449 namelen = receive_extralen(ms);
2451 error = find_rsb(ls, ms->m_extra, namelen, R_MASTER, &r);
2460 error = do_request(r, lkb);
2461 send_request_reply(r, lkb, error);
2466 if (error == -EINPROGRESS)
2473 setup_stub_lkb(ls, ms);
2474 send_request_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error);
2477 static void receive_convert(struct dlm_ls *ls, struct dlm_message *ms)
2479 struct dlm_lkb *lkb;
2481 int error, reply = 1;
2483 error = find_lkb(ls, ms->m_remid, &lkb);
2487 r = lkb->lkb_resource;
2492 receive_flags(lkb, ms);
2493 error = receive_convert_args(ls, lkb, ms);
2496 reply = !down_conversion(lkb);
2498 error = do_convert(r, lkb);
2501 send_convert_reply(r, lkb, error);
2509 setup_stub_lkb(ls, ms);
2510 send_convert_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error);
2513 static void receive_unlock(struct dlm_ls *ls, struct dlm_message *ms)
2515 struct dlm_lkb *lkb;
2519 error = find_lkb(ls, ms->m_remid, &lkb);
2523 r = lkb->lkb_resource;
2528 receive_flags(lkb, ms);
2529 error = receive_unlock_args(ls, lkb, ms);
2533 error = do_unlock(r, lkb);
2535 send_unlock_reply(r, lkb, error);
2543 setup_stub_lkb(ls, ms);
2544 send_unlock_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error);
2547 static void receive_cancel(struct dlm_ls *ls, struct dlm_message *ms)
2549 struct dlm_lkb *lkb;
2553 error = find_lkb(ls, ms->m_remid, &lkb);
2557 receive_flags(lkb, ms);
2559 r = lkb->lkb_resource;
2564 error = do_cancel(r, lkb);
2565 send_cancel_reply(r, lkb, error);
2573 setup_stub_lkb(ls, ms);
2574 send_cancel_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error);
2577 static void receive_grant(struct dlm_ls *ls, struct dlm_message *ms)
2579 struct dlm_lkb *lkb;
2583 error = find_lkb(ls, ms->m_remid, &lkb);
2585 log_error(ls, "receive_grant no lkb");
2588 DLM_ASSERT(is_process_copy(lkb), dlm_print_lkb(lkb););
2590 r = lkb->lkb_resource;
2595 receive_flags_reply(lkb, ms);
2596 grant_lock_pc(r, lkb, ms);
2597 queue_cast(r, lkb, 0);
2604 static void receive_bast(struct dlm_ls *ls, struct dlm_message *ms)
2606 struct dlm_lkb *lkb;
2610 error = find_lkb(ls, ms->m_remid, &lkb);
2612 log_error(ls, "receive_bast no lkb");
2615 DLM_ASSERT(is_process_copy(lkb), dlm_print_lkb(lkb););
2617 r = lkb->lkb_resource;
2622 queue_bast(r, lkb, ms->m_bastmode);
2629 static void receive_lookup(struct dlm_ls *ls, struct dlm_message *ms)
2631 int len, error, ret_nodeid, dir_nodeid, from_nodeid, our_nodeid;
2633 from_nodeid = ms->m_header.h_nodeid;
2634 our_nodeid = dlm_our_nodeid();
2636 len = receive_extralen(ms);
2638 dir_nodeid = dlm_hash2nodeid(ls, ms->m_hash);
2639 if (dir_nodeid != our_nodeid) {
2640 log_error(ls, "lookup dir_nodeid %d from %d",
2641 dir_nodeid, from_nodeid);
2647 error = dlm_dir_lookup(ls, from_nodeid, ms->m_extra, len, &ret_nodeid);
2649 /* Optimization: we're master so treat lookup as a request */
2650 if (!error && ret_nodeid == our_nodeid) {
2651 receive_request(ls, ms);
2655 send_lookup_reply(ls, ms, ret_nodeid, error);
2658 static void receive_remove(struct dlm_ls *ls, struct dlm_message *ms)
2660 int len, dir_nodeid, from_nodeid;
2662 from_nodeid = ms->m_header.h_nodeid;
2664 len = receive_extralen(ms);
2666 dir_nodeid = dlm_hash2nodeid(ls, ms->m_hash);
2667 if (dir_nodeid != dlm_our_nodeid()) {
2668 log_error(ls, "remove dir entry dir_nodeid %d from %d",
2669 dir_nodeid, from_nodeid);
2673 dlm_dir_remove_entry(ls, from_nodeid, ms->m_extra, len);
2676 static void receive_request_reply(struct dlm_ls *ls, struct dlm_message *ms)
2678 struct dlm_lkb *lkb;
2682 error = find_lkb(ls, ms->m_remid, &lkb);
2684 log_error(ls, "receive_request_reply no lkb");
2687 DLM_ASSERT(is_process_copy(lkb), dlm_print_lkb(lkb););
2689 mstype = lkb->lkb_wait_type;
2690 error = remove_from_waiters(lkb);
2692 log_error(ls, "receive_request_reply not on waiters");
2696 /* this is the value returned from do_request() on the master */
2697 error = ms->m_result;
2699 r = lkb->lkb_resource;
2703 /* Optimization: the dir node was also the master, so it took our
2704 lookup as a request and sent request reply instead of lookup reply */
2705 if (mstype == DLM_MSG_LOOKUP) {
2706 r->res_nodeid = ms->m_header.h_nodeid;
2707 lkb->lkb_nodeid = r->res_nodeid;
2712 /* request would block (be queued) on remote master;
2713 the unhold undoes the original ref from create_lkb()
2714 so it leads to the lkb being freed */
2715 queue_cast(r, lkb, -EAGAIN);
2716 confirm_master(r, -EAGAIN);
2722 /* request was queued or granted on remote master */
2723 receive_flags_reply(lkb, ms);
2724 lkb->lkb_remid = ms->m_lkid;
2726 add_lkb(r, lkb, DLM_LKSTS_WAITING);
2728 grant_lock_pc(r, lkb, ms);
2729 queue_cast(r, lkb, 0);
2731 confirm_master(r, error);
2736 /* find_rsb failed to find rsb or rsb wasn't master */
2738 lkb->lkb_nodeid = -1;
2739 _request_lock(r, lkb);
2743 log_error(ls, "receive_request_reply error %d", error);
2752 static void __receive_convert_reply(struct dlm_rsb *r, struct dlm_lkb *lkb,
2753 struct dlm_message *ms)
2755 int error = ms->m_result;
2757 /* this is the value returned from do_convert() on the master */
2761 /* convert would block (be queued) on remote master */
2762 queue_cast(r, lkb, -EAGAIN);
2766 /* convert was queued on remote master */
2768 add_lkb(r, lkb, DLM_LKSTS_CONVERT);
2772 /* convert was granted on remote master */
2773 receive_flags_reply(lkb, ms);
2774 grant_lock_pc(r, lkb, ms);
2775 queue_cast(r, lkb, 0);
2779 log_error(r->res_ls, "receive_convert_reply error %d", error);
2783 static void _receive_convert_reply(struct dlm_lkb *lkb, struct dlm_message *ms)
2785 struct dlm_rsb *r = lkb->lkb_resource;
2790 __receive_convert_reply(r, lkb, ms);
2796 static void receive_convert_reply(struct dlm_ls *ls, struct dlm_message *ms)
2798 struct dlm_lkb *lkb;
2801 error = find_lkb(ls, ms->m_remid, &lkb);
2803 log_error(ls, "receive_convert_reply no lkb");
2806 DLM_ASSERT(is_process_copy(lkb), dlm_print_lkb(lkb););
2808 error = remove_from_waiters(lkb);
2810 log_error(ls, "receive_convert_reply not on waiters");
2814 _receive_convert_reply(lkb, ms);
2819 static void _receive_unlock_reply(struct dlm_lkb *lkb, struct dlm_message *ms)
2821 struct dlm_rsb *r = lkb->lkb_resource;
2822 int error = ms->m_result;
2827 /* this is the value returned from do_unlock() on the master */
2831 receive_flags_reply(lkb, ms);
2832 remove_lock_pc(r, lkb);
2833 queue_cast(r, lkb, -DLM_EUNLOCK);
2836 log_error(r->res_ls, "receive_unlock_reply error %d", error);
2843 static void receive_unlock_reply(struct dlm_ls *ls, struct dlm_message *ms)
2845 struct dlm_lkb *lkb;
2848 error = find_lkb(ls, ms->m_remid, &lkb);
2850 log_error(ls, "receive_unlock_reply no lkb");
2853 DLM_ASSERT(is_process_copy(lkb), dlm_print_lkb(lkb););
2855 error = remove_from_waiters(lkb);
2857 log_error(ls, "receive_unlock_reply not on waiters");
2861 _receive_unlock_reply(lkb, ms);
2866 static void _receive_cancel_reply(struct dlm_lkb *lkb, struct dlm_message *ms)
2868 struct dlm_rsb *r = lkb->lkb_resource;
2869 int error = ms->m_result;
2874 /* this is the value returned from do_cancel() on the master */
2878 receive_flags_reply(lkb, ms);
2879 revert_lock_pc(r, lkb);
2880 queue_cast(r, lkb, -DLM_ECANCEL);
2883 log_error(r->res_ls, "receive_cancel_reply error %d", error);
2890 static void receive_cancel_reply(struct dlm_ls *ls, struct dlm_message *ms)
2892 struct dlm_lkb *lkb;
2895 error = find_lkb(ls, ms->m_remid, &lkb);
2897 log_error(ls, "receive_cancel_reply no lkb");
2900 DLM_ASSERT(is_process_copy(lkb), dlm_print_lkb(lkb););
2902 error = remove_from_waiters(lkb);
2904 log_error(ls, "receive_cancel_reply not on waiters");
2908 _receive_cancel_reply(lkb, ms);
2913 static void receive_lookup_reply(struct dlm_ls *ls, struct dlm_message *ms)
2915 struct dlm_lkb *lkb;
2917 int error, ret_nodeid;
2919 error = find_lkb(ls, ms->m_lkid, &lkb);
2921 log_error(ls, "receive_lookup_reply no lkb");
2925 error = remove_from_waiters(lkb);
2927 log_error(ls, "receive_lookup_reply not on waiters");
2931 /* this is the value returned by dlm_dir_lookup on dir node
2932 FIXME: will a non-zero error ever be returned? */
2933 error = ms->m_result;
2935 r = lkb->lkb_resource;
2939 ret_nodeid = ms->m_nodeid;
2940 if (ret_nodeid == dlm_our_nodeid()) {
2943 r->res_first_lkid = 0;
2945 /* set_master() will copy res_nodeid to lkb_nodeid */
2946 r->res_nodeid = ret_nodeid;
2949 _request_lock(r, lkb);
2952 process_lookup_list(r);
2960 int dlm_receive_message(struct dlm_header *hd, int nodeid, int recovery)
2962 struct dlm_message *ms = (struct dlm_message *) hd;
2969 ls = dlm_find_lockspace_global(hd->h_lockspace);
2971 log_print("drop message %d from %d for unknown lockspace %d",
2972 ms->m_type, nodeid, hd->h_lockspace);
2976 /* recovery may have just ended leaving a bunch of backed-up requests
2977 in the requestqueue; wait while dlm_recoverd clears them */
2980 dlm_wait_requestqueue(ls);
2982 /* recovery may have just started while there were a bunch of
2983 in-flight requests -- save them in requestqueue to be processed
2984 after recovery. we can't let dlm_recvd block on the recovery
2985 lock. if dlm_recoverd is calling this function to clear the
2986 requestqueue, it needs to be interrupted (-EINTR) if another
2987 recovery operation is starting. */
2990 if (dlm_locking_stopped(ls)) {
2992 dlm_add_requestqueue(ls, nodeid, hd);
2997 if (lock_recovery_try(ls))
3002 switch (ms->m_type) {
3004 /* messages sent to a master node */
3006 case DLM_MSG_REQUEST:
3007 receive_request(ls, ms);
3010 case DLM_MSG_CONVERT:
3011 receive_convert(ls, ms);
3014 case DLM_MSG_UNLOCK:
3015 receive_unlock(ls, ms);
3018 case DLM_MSG_CANCEL:
3019 receive_cancel(ls, ms);
3022 /* messages sent from a master node (replies to above) */
3024 case DLM_MSG_REQUEST_REPLY:
3025 receive_request_reply(ls, ms);
3028 case DLM_MSG_CONVERT_REPLY:
3029 receive_convert_reply(ls, ms);
3032 case DLM_MSG_UNLOCK_REPLY:
3033 receive_unlock_reply(ls, ms);
3036 case DLM_MSG_CANCEL_REPLY:
3037 receive_cancel_reply(ls, ms);
3040 /* messages sent from a master node (only two types of async msg) */
3043 receive_grant(ls, ms);
3047 receive_bast(ls, ms);
3050 /* messages sent to a dir node */
3052 case DLM_MSG_LOOKUP:
3053 receive_lookup(ls, ms);
3056 case DLM_MSG_REMOVE:
3057 receive_remove(ls, ms);
3060 /* messages sent from a dir node (remove has no reply) */
3062 case DLM_MSG_LOOKUP_REPLY:
3063 receive_lookup_reply(ls, ms);
3067 log_error(ls, "unknown message type %d", ms->m_type);
3070 unlock_recovery(ls);
3072 dlm_put_lockspace(ls);
3082 static void recover_convert_waiter(struct dlm_ls *ls, struct dlm_lkb *lkb)
3084 if (middle_conversion(lkb)) {
3086 ls->ls_stub_ms.m_result = -EINPROGRESS;
3087 _remove_from_waiters(lkb);
3088 _receive_convert_reply(lkb, &ls->ls_stub_ms);
3090 /* Same special case as in receive_rcom_lock_args() */
3091 lkb->lkb_grmode = DLM_LOCK_IV;
3092 rsb_set_flag(lkb->lkb_resource, RSB_RECOVER_CONVERT);
3095 } else if (lkb->lkb_rqmode >= lkb->lkb_grmode) {
3096 lkb->lkb_flags |= DLM_IFL_RESEND;
3099 /* lkb->lkb_rqmode < lkb->lkb_grmode shouldn't happen since down
3100 conversions are async; there's no reply from the remote master */
3103 /* A waiting lkb needs recovery if the master node has failed, or
3104 the master node is changing (only when no directory is used) */
3106 static int waiter_needs_recovery(struct dlm_ls *ls, struct dlm_lkb *lkb)
3108 if (dlm_is_removed(ls, lkb->lkb_nodeid))
3111 if (!dlm_no_directory(ls))
3114 if (dlm_dir_nodeid(lkb->lkb_resource) != lkb->lkb_nodeid)
3120 /* Recovery for locks that are waiting for replies from nodes that are now
3121 gone. We can just complete unlocks and cancels by faking a reply from the
3122 dead node. Requests and up-conversions we flag to be resent after
3123 recovery. Down-conversions can just be completed with a fake reply like
3124 unlocks. Conversions between PR and CW need special attention. */
3126 void dlm_recover_waiters_pre(struct dlm_ls *ls)
3128 struct dlm_lkb *lkb, *safe;
3130 mutex_lock(&ls->ls_waiters_mutex);
3132 list_for_each_entry_safe(lkb, safe, &ls->ls_waiters, lkb_wait_reply) {
3133 log_debug(ls, "pre recover waiter lkid %x type %d flags %x",
3134 lkb->lkb_id, lkb->lkb_wait_type, lkb->lkb_flags);
3136 /* all outstanding lookups, regardless of destination will be
3137 resent after recovery is done */
3139 if (lkb->lkb_wait_type == DLM_MSG_LOOKUP) {
3140 lkb->lkb_flags |= DLM_IFL_RESEND;
3144 if (!waiter_needs_recovery(ls, lkb))
3147 switch (lkb->lkb_wait_type) {
3149 case DLM_MSG_REQUEST:
3150 lkb->lkb_flags |= DLM_IFL_RESEND;
3153 case DLM_MSG_CONVERT:
3154 recover_convert_waiter(ls, lkb);
3157 case DLM_MSG_UNLOCK:
3159 ls->ls_stub_ms.m_result = -DLM_EUNLOCK;
3160 _remove_from_waiters(lkb);
3161 _receive_unlock_reply(lkb, &ls->ls_stub_ms);
3165 case DLM_MSG_CANCEL:
3167 ls->ls_stub_ms.m_result = -DLM_ECANCEL;
3168 _remove_from_waiters(lkb);
3169 _receive_cancel_reply(lkb, &ls->ls_stub_ms);
3174 log_error(ls, "invalid lkb wait_type %d",
3175 lkb->lkb_wait_type);
3178 mutex_unlock(&ls->ls_waiters_mutex);
3181 static int remove_resend_waiter(struct dlm_ls *ls, struct dlm_lkb **lkb_ret)
3183 struct dlm_lkb *lkb;
3186 mutex_lock(&ls->ls_waiters_mutex);
3187 list_for_each_entry(lkb, &ls->ls_waiters, lkb_wait_reply) {
3188 if (lkb->lkb_flags & DLM_IFL_RESEND) {
3189 rv = lkb->lkb_wait_type;
3190 _remove_from_waiters(lkb);
3191 lkb->lkb_flags &= ~DLM_IFL_RESEND;
3195 mutex_unlock(&ls->ls_waiters_mutex);
3203 /* Deal with lookups and lkb's marked RESEND from _pre. We may now be the
3204 master or dir-node for r. Processing the lkb may result in it being placed
3207 int dlm_recover_waiters_post(struct dlm_ls *ls)
3209 struct dlm_lkb *lkb;
3211 int error = 0, mstype;
3214 if (dlm_locking_stopped(ls)) {
3215 log_debug(ls, "recover_waiters_post aborted");
3220 mstype = remove_resend_waiter(ls, &lkb);
3224 r = lkb->lkb_resource;
3226 log_debug(ls, "recover_waiters_post %x type %d flags %x %s",
3227 lkb->lkb_id, mstype, lkb->lkb_flags, r->res_name);
3231 case DLM_MSG_LOOKUP:
3234 _request_lock(r, lkb);
3236 confirm_master(r, 0);
3241 case DLM_MSG_REQUEST:
3244 _request_lock(r, lkb);
3249 case DLM_MSG_CONVERT:
3252 _convert_lock(r, lkb);
3258 log_error(ls, "recover_waiters_post type %d", mstype);
3265 static void purge_queue(struct dlm_rsb *r, struct list_head *queue,
3266 int (*test)(struct dlm_ls *ls, struct dlm_lkb *lkb))
3268 struct dlm_ls *ls = r->res_ls;
3269 struct dlm_lkb *lkb, *safe;
3271 list_for_each_entry_safe(lkb, safe, queue, lkb_statequeue) {
3272 if (test(ls, lkb)) {
3274 /* this put should free the lkb */
3276 log_error(ls, "purged lkb not released");
3281 static int purge_dead_test(struct dlm_ls *ls, struct dlm_lkb *lkb)
3283 return (is_master_copy(lkb) && dlm_is_removed(ls, lkb->lkb_nodeid));
3286 static int purge_mstcpy_test(struct dlm_ls *ls, struct dlm_lkb *lkb)
3288 return is_master_copy(lkb);
3291 static void purge_dead_locks(struct dlm_rsb *r)
3293 purge_queue(r, &r->res_grantqueue, &purge_dead_test);
3294 purge_queue(r, &r->res_convertqueue, &purge_dead_test);
3295 purge_queue(r, &r->res_waitqueue, &purge_dead_test);
3298 void dlm_purge_mstcpy_locks(struct dlm_rsb *r)
3300 purge_queue(r, &r->res_grantqueue, &purge_mstcpy_test);
3301 purge_queue(r, &r->res_convertqueue, &purge_mstcpy_test);
3302 purge_queue(r, &r->res_waitqueue, &purge_mstcpy_test);
3305 /* Get rid of locks held by nodes that are gone. */
3307 int dlm_purge_locks(struct dlm_ls *ls)
3311 log_debug(ls, "dlm_purge_locks");
3313 down_write(&ls->ls_root_sem);
3314 list_for_each_entry(r, &ls->ls_root_list, res_root_list) {
3318 purge_dead_locks(r);
3324 up_write(&ls->ls_root_sem);
3329 int dlm_grant_after_purge(struct dlm_ls *ls)
3334 for (i = 0; i < ls->ls_rsbtbl_size; i++) {
3335 read_lock(&ls->ls_rsbtbl[i].lock);
3336 list_for_each_entry(r, &ls->ls_rsbtbl[i].list, res_hashchain) {
3340 grant_pending_locks(r);
3341 confirm_master(r, 0);
3346 read_unlock(&ls->ls_rsbtbl[i].lock);
3352 static struct dlm_lkb *search_remid_list(struct list_head *head, int nodeid,
3355 struct dlm_lkb *lkb;
3357 list_for_each_entry(lkb, head, lkb_statequeue) {
3358 if (lkb->lkb_nodeid == nodeid && lkb->lkb_remid == remid)
3364 static struct dlm_lkb *search_remid(struct dlm_rsb *r, int nodeid,
3367 struct dlm_lkb *lkb;
3369 lkb = search_remid_list(&r->res_grantqueue, nodeid, remid);
3372 lkb = search_remid_list(&r->res_convertqueue, nodeid, remid);
3375 lkb = search_remid_list(&r->res_waitqueue, nodeid, remid);
3381 static int receive_rcom_lock_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
3382 struct dlm_rsb *r, struct dlm_rcom *rc)
3384 struct rcom_lock *rl = (struct rcom_lock *) rc->rc_buf;
3387 lkb->lkb_nodeid = rc->rc_header.h_nodeid;
3388 lkb->lkb_ownpid = rl->rl_ownpid;
3389 lkb->lkb_remid = rl->rl_lkid;
3390 lkb->lkb_exflags = rl->rl_exflags;
3391 lkb->lkb_flags = rl->rl_flags & 0x0000FFFF;
3392 lkb->lkb_flags |= DLM_IFL_MSTCPY;
3393 lkb->lkb_lvbseq = rl->rl_lvbseq;
3394 lkb->lkb_rqmode = rl->rl_rqmode;
3395 lkb->lkb_grmode = rl->rl_grmode;
3396 /* don't set lkb_status because add_lkb wants to itself */
3398 lkb->lkb_bastaddr = (void *) (long) (rl->rl_asts & AST_BAST);
3399 lkb->lkb_astaddr = (void *) (long) (rl->rl_asts & AST_COMP);
3401 if (lkb->lkb_exflags & DLM_LKF_VALBLK) {
3402 lkb->lkb_lvbptr = allocate_lvb(ls);
3403 if (!lkb->lkb_lvbptr)
3405 lvblen = rc->rc_header.h_length - sizeof(struct dlm_rcom) -
3406 sizeof(struct rcom_lock);
3407 memcpy(lkb->lkb_lvbptr, rl->rl_lvb, lvblen);
3410 /* Conversions between PR and CW (middle modes) need special handling.
3411 The real granted mode of these converting locks cannot be determined
3412 until all locks have been rebuilt on the rsb (recover_conversion) */
3414 if (rl->rl_wait_type == DLM_MSG_CONVERT && middle_conversion(lkb)) {
3415 rl->rl_status = DLM_LKSTS_CONVERT;
3416 lkb->lkb_grmode = DLM_LOCK_IV;
3417 rsb_set_flag(r, RSB_RECOVER_CONVERT);
3423 /* This lkb may have been recovered in a previous aborted recovery so we need
3424 to check if the rsb already has an lkb with the given remote nodeid/lkid.
3425 If so we just send back a standard reply. If not, we create a new lkb with
3426 the given values and send back our lkid. We send back our lkid by sending
3427 back the rcom_lock struct we got but with the remid field filled in. */
3429 int dlm_recover_master_copy(struct dlm_ls *ls, struct dlm_rcom *rc)
3431 struct rcom_lock *rl = (struct rcom_lock *) rc->rc_buf;
3433 struct dlm_lkb *lkb;
3436 if (rl->rl_parent_lkid) {
3437 error = -EOPNOTSUPP;
3441 error = find_rsb(ls, rl->rl_name, rl->rl_namelen, R_MASTER, &r);
3447 lkb = search_remid(r, rc->rc_header.h_nodeid, rl->rl_lkid);
3453 error = create_lkb(ls, &lkb);
3457 error = receive_rcom_lock_args(ls, lkb, r, rc);
3464 add_lkb(r, lkb, rl->rl_status);
3468 /* this is the new value returned to the lock holder for
3469 saving in its process-copy lkb */
3470 rl->rl_remid = lkb->lkb_id;
3477 log_print("recover_master_copy %d %x", error, rl->rl_lkid);
3478 rl->rl_result = error;
3482 int dlm_recover_process_copy(struct dlm_ls *ls, struct dlm_rcom *rc)
3484 struct rcom_lock *rl = (struct rcom_lock *) rc->rc_buf;
3486 struct dlm_lkb *lkb;
3489 error = find_lkb(ls, rl->rl_lkid, &lkb);
3491 log_error(ls, "recover_process_copy no lkid %x", rl->rl_lkid);
3495 DLM_ASSERT(is_process_copy(lkb), dlm_print_lkb(lkb););
3497 error = rl->rl_result;
3499 r = lkb->lkb_resource;
3505 log_debug(ls, "master copy exists %x", lkb->lkb_id);
3508 lkb->lkb_remid = rl->rl_remid;
3511 log_error(ls, "dlm_recover_process_copy unknown error %d %x",
3512 error, lkb->lkb_id);
3515 /* an ack for dlm_recover_locks() which waits for replies from
3516 all the locks it sends to new masters */
3517 dlm_recovered_lock(r);