1 /* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
6 * Copyright (C) 2004 Oracle. All rights reserved.
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
18 * You should have received a copy of the GNU General Public
19 * License along with this program; if not, write to the
20 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
21 * Boston, MA 021110-1307, USA.
28 #include <linux/kref.h>
30 #define DLM_HB_NODE_DOWN_PRI (0xf000000)
31 #define DLM_HB_NODE_UP_PRI (0x8000000)
33 #define DLM_LOCKID_NAME_MAX 32
35 #define DLM_DOMAIN_NAME_MAX_LEN 255
36 #define DLM_LOCK_RES_OWNER_UNKNOWN O2NM_MAX_NODES
37 #define DLM_THREAD_SHUFFLE_INTERVAL 5 // flush everything every 5 passes
38 #define DLM_THREAD_MS 200 // flush at least every 200 ms
40 #define DLM_HASH_SIZE_DEFAULT (1 << 14)
41 #if DLM_HASH_SIZE_DEFAULT < PAGE_SIZE
42 # define DLM_HASH_PAGES 1
44 # define DLM_HASH_PAGES (DLM_HASH_SIZE_DEFAULT / PAGE_SIZE)
46 #define DLM_BUCKETS_PER_PAGE (PAGE_SIZE / sizeof(struct hlist_head))
47 #define DLM_HASH_BUCKETS (DLM_HASH_PAGES * DLM_BUCKETS_PER_PAGE)
49 /* Intended to make it easier for us to switch out hash functions */
50 #define dlm_lockid_hash(_n, _l) full_name_hash(_n, _l)
59 #define LKM_VALID_FLAGS (LKM_VALBLK | LKM_CONVERT | LKM_UNLOCK | \
60 LKM_CANCEL | LKM_INVVALBLK | LKM_FORCE | \
61 LKM_RECOVERY | LKM_LOCAL | LKM_NOQUEUE)
63 #define DLM_RECOVERY_LOCK_NAME "$RECOVERY"
64 #define DLM_RECOVERY_LOCK_NAME_LEN 9
66 static inline int dlm_is_recovery_lock(const char *lock_name, int name_len)
68 if (name_len == DLM_RECOVERY_LOCK_NAME_LEN &&
69 memcmp(lock_name, DLM_RECOVERY_LOCK_NAME, name_len)==0)
74 #define DLM_RECO_STATE_ACTIVE 0x0001
75 #define DLM_RECO_STATE_FINALIZE 0x0002
77 struct dlm_recovery_ctxt
79 struct list_head resources;
80 struct list_head received;
81 struct list_head node_data;
85 unsigned long node_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
86 wait_queue_head_t event;
98 struct list_head list;
99 struct hlist_head **lockres_hash;
100 struct list_head dirty_list;
101 struct list_head purge_list;
102 struct list_head pending_asts;
103 struct list_head pending_basts;
104 unsigned int purge_count;
111 wait_queue_head_t dlm_join_events;
112 unsigned long live_nodes_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
113 unsigned long domain_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
114 unsigned long recovery_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
115 struct dlm_recovery_ctxt reco;
116 spinlock_t master_lock;
117 struct list_head master_list;
118 struct list_head mle_hb_events;
120 /* these give a really vague idea of the system load */
121 atomic_t local_resources;
122 atomic_t remote_resources;
123 atomic_t unknown_resources;
125 /* NOTE: Next three are protected by dlm_domain_lock */
126 struct kref dlm_refs;
127 enum dlm_ctxt_state dlm_state;
128 unsigned int num_joins;
130 struct o2hb_callback_func dlm_hb_up;
131 struct o2hb_callback_func dlm_hb_down;
132 struct task_struct *dlm_thread_task;
133 struct task_struct *dlm_reco_thread_task;
134 struct workqueue_struct *dlm_worker;
135 wait_queue_head_t dlm_thread_wq;
136 wait_queue_head_t dlm_reco_thread_wq;
137 wait_queue_head_t ast_wq;
138 wait_queue_head_t migration_wq;
140 struct work_struct dispatched_work;
141 struct list_head work_list;
142 spinlock_t work_lock;
143 struct list_head dlm_domain_handlers;
144 struct list_head dlm_eviction_callbacks;
147 static inline struct hlist_head *dlm_lockres_hash(struct dlm_ctxt *dlm, unsigned i)
149 return dlm->lockres_hash[(i / DLM_BUCKETS_PER_PAGE) % DLM_HASH_PAGES] + (i % DLM_BUCKETS_PER_PAGE);
152 /* these keventd work queue items are for less-frequently
153 * called functions that cannot be directly called from the
154 * net message handlers for some reason, usually because
155 * they need to send net messages of their own. */
156 void dlm_dispatch_work(struct work_struct *work);
158 struct dlm_lock_resource;
159 struct dlm_work_item;
161 typedef void (dlm_workfunc_t)(struct dlm_work_item *, void *);
163 struct dlm_request_all_locks_priv
169 struct dlm_mig_lockres_priv
171 struct dlm_lock_resource *lockres;
175 struct dlm_assert_master_priv
177 struct dlm_lock_resource *lockres;
180 unsigned ignore_higher:1;
186 struct list_head list;
187 dlm_workfunc_t *func;
188 struct dlm_ctxt *dlm;
191 struct dlm_request_all_locks_priv ral;
192 struct dlm_mig_lockres_priv ml;
193 struct dlm_assert_master_priv am;
197 static inline void dlm_init_work_item(struct dlm_ctxt *dlm,
198 struct dlm_work_item *i,
199 dlm_workfunc_t *f, void *data)
201 memset(i, 0, sizeof(*i));
203 INIT_LIST_HEAD(&i->list);
205 i->dlm = dlm; /* must have already done a dlm_grab on this! */
210 static inline void __dlm_set_joining_node(struct dlm_ctxt *dlm,
213 assert_spin_locked(&dlm->spinlock);
215 dlm->joining_node = node;
216 wake_up(&dlm->dlm_join_events);
219 #define DLM_LOCK_RES_UNINITED 0x00000001
220 #define DLM_LOCK_RES_RECOVERING 0x00000002
221 #define DLM_LOCK_RES_READY 0x00000004
222 #define DLM_LOCK_RES_DIRTY 0x00000008
223 #define DLM_LOCK_RES_IN_PROGRESS 0x00000010
224 #define DLM_LOCK_RES_MIGRATING 0x00000020
225 #define DLM_LOCK_RES_DROPPING_REF 0x00000040
226 #define DLM_LOCK_RES_BLOCK_DIRTY 0x00001000
228 /* max milliseconds to wait to sync up a network failure with a node death */
229 #define DLM_NODE_DEATH_WAIT_MAX (5 * 1000)
231 #define DLM_PURGE_INTERVAL_MS (8 * 1000)
233 struct dlm_lock_resource
235 /* WARNING: Please see the comment in dlm_init_lockres before
236 * adding fields here. */
237 struct hlist_node hash_node;
238 struct qstr lockname;
242 * Please keep granted, converting, and blocked in this order,
243 * as some funcs want to iterate over all lists.
245 * All four lists are protected by the hash's reference.
247 struct list_head granted;
248 struct list_head converting;
249 struct list_head blocked;
250 struct list_head purge;
253 * These two lists require you to hold an additional reference
254 * while they are on the list.
256 struct list_head dirty;
257 struct list_head recovering; // dlm_recovery_ctxt.resources list
259 /* unused lock resources have their last_used stamped and are
260 * put on a list for the dlm thread to run. */
261 unsigned long last_used;
263 unsigned migration_pending:1;
264 atomic_t asts_reserved;
266 wait_queue_head_t wq;
267 u8 owner; //node which owns the lock resource, or unknown
269 char lvb[DLM_LVB_LEN];
270 unsigned int inflight_locks;
271 unsigned long refmap[BITS_TO_LONGS(O2NM_MAX_NODES)];
274 struct dlm_migratable_lock
278 /* these 3 are just padding for the in-memory structure, but
279 * list and flags are actually used when sent over the wire */
281 u8 list; // 0=granted, 1=converting, 2=blocked
292 struct dlm_migratable_lock ml;
294 struct list_head list;
295 struct list_head ast_list;
296 struct list_head bast_list;
297 struct dlm_lock_resource *lockres;
299 struct kref lock_refs;
301 // ast and bast must be callable while holding a spinlock!
302 dlm_astlockfunc_t *ast;
303 dlm_bastlockfunc_t *bast;
305 struct dlm_lockstatus *lksb;
306 unsigned ast_pending:1,
312 lksb_kernel_allocated:1;
316 #define DLM_LKSB_UNUSED1 0x01
317 #define DLM_LKSB_PUT_LVB 0x02
318 #define DLM_LKSB_GET_LVB 0x04
319 #define DLM_LKSB_UNUSED2 0x08
320 #define DLM_LKSB_UNUSED3 0x10
321 #define DLM_LKSB_UNUSED4 0x20
322 #define DLM_LKSB_UNUSED5 0x40
323 #define DLM_LKSB_UNUSED6 0x80
326 enum dlm_lockres_list {
327 DLM_GRANTED_LIST = 0,
332 static inline int dlm_lvb_is_empty(char *lvb)
335 for (i=0; i<DLM_LVB_LEN; i++)
341 static inline struct list_head *
342 dlm_list_idx_to_ptr(struct dlm_lock_resource *res, enum dlm_lockres_list idx)
344 struct list_head *ret = NULL;
345 if (idx == DLM_GRANTED_LIST)
347 else if (idx == DLM_CONVERTING_LIST)
348 ret = &res->converting;
349 else if (idx == DLM_BLOCKED_LIST)
361 unsigned long node_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
367 DLM_MASTER_REQUEST_MSG = 500,
368 DLM_UNUSED_MSG1, /* 501 */
369 DLM_ASSERT_MASTER_MSG, /* 502 */
370 DLM_CREATE_LOCK_MSG, /* 503 */
371 DLM_CONVERT_LOCK_MSG, /* 504 */
372 DLM_PROXY_AST_MSG, /* 505 */
373 DLM_UNLOCK_LOCK_MSG, /* 506 */
374 DLM_DEREF_LOCKRES_MSG, /* 507 */
375 DLM_MIGRATE_REQUEST_MSG, /* 508 */
376 DLM_MIG_LOCKRES_MSG, /* 509 */
377 DLM_QUERY_JOIN_MSG, /* 510 */
378 DLM_ASSERT_JOINED_MSG, /* 511 */
379 DLM_CANCEL_JOIN_MSG, /* 512 */
380 DLM_EXIT_DOMAIN_MSG, /* 513 */
381 DLM_MASTER_REQUERY_MSG, /* 514 */
382 DLM_LOCK_REQUEST_MSG, /* 515 */
383 DLM_RECO_DATA_DONE_MSG, /* 516 */
384 DLM_BEGIN_RECO_MSG, /* 517 */
385 DLM_FINALIZE_RECO_MSG /* 518 */
388 struct dlm_reco_node_data
392 struct list_head list;
396 DLM_RECO_NODE_DATA_DEAD = -1,
397 DLM_RECO_NODE_DATA_INIT = 0,
398 DLM_RECO_NODE_DATA_REQUESTING,
399 DLM_RECO_NODE_DATA_REQUESTED,
400 DLM_RECO_NODE_DATA_RECEIVING,
401 DLM_RECO_NODE_DATA_DONE,
402 DLM_RECO_NODE_DATA_FINALIZE_SENT,
407 DLM_MASTER_RESP_NO = 0,
409 DLM_MASTER_RESP_MAYBE,
410 DLM_MASTER_RESP_ERROR
414 struct dlm_master_request
421 u8 name[O2NM_MAX_NAME_LEN];
424 #define DLM_ASSERT_RESPONSE_REASSERT 0x00000001
425 #define DLM_ASSERT_RESPONSE_MASTERY_REF 0x00000002
427 #define DLM_ASSERT_MASTER_MLE_CLEANUP 0x00000001
428 #define DLM_ASSERT_MASTER_REQUERY 0x00000002
429 #define DLM_ASSERT_MASTER_FINISH_MIGRATION 0x00000004
430 struct dlm_assert_master
437 u8 name[O2NM_MAX_NAME_LEN];
440 #define DLM_MIGRATE_RESPONSE_MASTERY_REF 0x00000001
442 struct dlm_migrate_request
449 u8 name[O2NM_MAX_NAME_LEN];
452 struct dlm_master_requery
459 u8 name[O2NM_MAX_NAME_LEN];
462 #define DLM_MRES_RECOVERY 0x01
463 #define DLM_MRES_MIGRATION 0x02
464 #define DLM_MRES_ALL_DONE 0x04
467 * We would like to get one whole lockres into a single network
468 * message whenever possible. Generally speaking, there will be
469 * at most one dlm_lock on a lockres for each node in the cluster,
470 * plus (infrequently) any additional locks coming in from userdlm.
472 * struct _dlm_lockres_page
474 * dlm_migratable_lockres mres;
475 * dlm_migratable_lock ml[DLM_MAX_MIGRATABLE_LOCKS];
476 * u8 pad[DLM_MIG_LOCKRES_RESERVED];
479 * from ../cluster/tcp.h
480 * NET_MAX_PAYLOAD_BYTES (4096 - sizeof(net_msg))
481 * (roughly 4080 bytes)
482 * and sizeof(dlm_migratable_lockres) = 112 bytes
483 * and sizeof(dlm_migratable_lock) = 16 bytes
485 * Choosing DLM_MAX_MIGRATABLE_LOCKS=240 and
486 * DLM_MIG_LOCKRES_RESERVED=128 means we have this:
488 * (DLM_MAX_MIGRATABLE_LOCKS * sizeof(dlm_migratable_lock)) +
489 * sizeof(dlm_migratable_lockres) + DLM_MIG_LOCKRES_RESERVED =
490 * NET_MAX_PAYLOAD_BYTES
491 * (240 * 16) + 112 + 128 = 4080
493 * So a lockres would need more than 240 locks before it would
494 * use more than one network packet to recover. Not too bad.
496 #define DLM_MAX_MIGRATABLE_LOCKS 240
498 struct dlm_migratable_lockres
502 u8 num_locks; // locks sent in this structure
504 __be32 total_locks; // locks to be sent for this migration cookie
505 __be64 mig_cookie; // cookie for this lockres migration
506 // or zero if not needed
508 u8 lockname[DLM_LOCKID_NAME_MAX];
512 struct dlm_migratable_lock ml[0]; // 16 bytes each, begins at byte 112
514 #define DLM_MIG_LOCKRES_MAX_LEN \
515 (sizeof(struct dlm_migratable_lockres) + \
516 (sizeof(struct dlm_migratable_lock) * \
517 DLM_MAX_MIGRATABLE_LOCKS) )
519 /* from above, 128 bytes
520 * for some undetermined future use */
521 #define DLM_MIG_LOCKRES_RESERVED (NET_MAX_PAYLOAD_BYTES - \
522 DLM_MIG_LOCKRES_MAX_LEN)
524 struct dlm_create_lock
534 u8 name[O2NM_MAX_NAME_LEN];
537 struct dlm_convert_lock
547 u8 name[O2NM_MAX_NAME_LEN];
551 #define DLM_CONVERT_LOCK_MAX_LEN (sizeof(struct dlm_convert_lock)+DLM_LVB_LEN)
553 struct dlm_unlock_lock
562 u8 name[O2NM_MAX_NAME_LEN];
566 #define DLM_UNLOCK_LOCK_MAX_LEN (sizeof(struct dlm_unlock_lock)+DLM_LVB_LEN)
578 u8 name[O2NM_MAX_NAME_LEN];
582 #define DLM_PROXY_AST_MAX_LEN (sizeof(struct dlm_proxy_ast)+DLM_LVB_LEN)
584 #define DLM_MOD_KEY (0x666c6172)
585 enum dlm_query_join_response {
591 struct dlm_lock_request
599 struct dlm_reco_data_done
607 /* eventually we can use this to attempt
608 * lvb recovery based on each node's info */
609 u8 reco_lvb[DLM_LVB_LEN];
612 struct dlm_begin_reco
621 struct dlm_query_join_request
626 u8 domain[O2NM_MAX_NAME_LEN];
629 struct dlm_assert_joined
634 u8 domain[O2NM_MAX_NAME_LEN];
637 struct dlm_cancel_join
642 u8 domain[O2NM_MAX_NAME_LEN];
645 struct dlm_exit_domain
651 struct dlm_finalize_reco
660 struct dlm_deref_lockres
667 u8 name[O2NM_MAX_NAME_LEN];
670 static inline enum dlm_status
671 __dlm_lockres_state_to_status(struct dlm_lock_resource *res)
673 enum dlm_status status = DLM_NORMAL;
675 assert_spin_locked(&res->spinlock);
677 if (res->state & DLM_LOCK_RES_RECOVERING)
678 status = DLM_RECOVERING;
679 else if (res->state & DLM_LOCK_RES_MIGRATING)
680 status = DLM_MIGRATING;
681 else if (res->state & DLM_LOCK_RES_IN_PROGRESS)
682 status = DLM_FORWARD;
687 static inline u8 dlm_get_lock_cookie_node(u64 cookie)
691 ret = (u8)(cookie & 0xffULL);
695 static inline unsigned long long dlm_get_lock_cookie_seq(u64 cookie)
697 unsigned long long ret;
698 ret = ((unsigned long long)cookie) & 0x00ffffffffffffffULL;
702 struct dlm_lock * dlm_new_lock(int type, u8 node, u64 cookie,
703 struct dlm_lockstatus *lksb);
704 void dlm_lock_get(struct dlm_lock *lock);
705 void dlm_lock_put(struct dlm_lock *lock);
707 void dlm_lock_attach_lockres(struct dlm_lock *lock,
708 struct dlm_lock_resource *res);
710 int dlm_create_lock_handler(struct o2net_msg *msg, u32 len, void *data,
712 int dlm_convert_lock_handler(struct o2net_msg *msg, u32 len, void *data,
714 int dlm_proxy_ast_handler(struct o2net_msg *msg, u32 len, void *data,
717 void dlm_revert_pending_convert(struct dlm_lock_resource *res,
718 struct dlm_lock *lock);
719 void dlm_revert_pending_lock(struct dlm_lock_resource *res,
720 struct dlm_lock *lock);
722 int dlm_unlock_lock_handler(struct o2net_msg *msg, u32 len, void *data,
724 void dlm_commit_pending_cancel(struct dlm_lock_resource *res,
725 struct dlm_lock *lock);
726 void dlm_commit_pending_unlock(struct dlm_lock_resource *res,
727 struct dlm_lock *lock);
729 int dlm_launch_thread(struct dlm_ctxt *dlm);
730 void dlm_complete_thread(struct dlm_ctxt *dlm);
731 int dlm_launch_recovery_thread(struct dlm_ctxt *dlm);
732 void dlm_complete_recovery_thread(struct dlm_ctxt *dlm);
733 void dlm_wait_for_recovery(struct dlm_ctxt *dlm);
734 void dlm_kick_recovery_thread(struct dlm_ctxt *dlm);
735 int dlm_is_node_dead(struct dlm_ctxt *dlm, u8 node);
736 int dlm_wait_for_node_death(struct dlm_ctxt *dlm, u8 node, int timeout);
737 int dlm_wait_for_node_recovery(struct dlm_ctxt *dlm, u8 node, int timeout);
739 void dlm_put(struct dlm_ctxt *dlm);
740 struct dlm_ctxt *dlm_grab(struct dlm_ctxt *dlm);
741 int dlm_domain_fully_joined(struct dlm_ctxt *dlm);
743 void __dlm_lockres_calc_usage(struct dlm_ctxt *dlm,
744 struct dlm_lock_resource *res);
745 void dlm_lockres_calc_usage(struct dlm_ctxt *dlm,
746 struct dlm_lock_resource *res);
747 static inline void dlm_lockres_get(struct dlm_lock_resource *res)
749 /* This is called on every lookup, so it might be worth
751 kref_get(&res->refs);
753 void dlm_lockres_put(struct dlm_lock_resource *res);
754 void __dlm_unhash_lockres(struct dlm_lock_resource *res);
755 void __dlm_insert_lockres(struct dlm_ctxt *dlm,
756 struct dlm_lock_resource *res);
757 struct dlm_lock_resource * __dlm_lookup_lockres_full(struct dlm_ctxt *dlm,
761 struct dlm_lock_resource * __dlm_lookup_lockres(struct dlm_ctxt *dlm,
765 struct dlm_lock_resource * dlm_lookup_lockres(struct dlm_ctxt *dlm,
769 int dlm_is_host_down(int errno);
770 void dlm_change_lockres_owner(struct dlm_ctxt *dlm,
771 struct dlm_lock_resource *res,
773 struct dlm_lock_resource * dlm_get_lock_resource(struct dlm_ctxt *dlm,
777 struct dlm_lock_resource *dlm_new_lockres(struct dlm_ctxt *dlm,
779 unsigned int namelen);
781 #define dlm_lockres_set_refmap_bit(bit,res) \
782 __dlm_lockres_set_refmap_bit(bit,res,__FILE__,__LINE__)
783 #define dlm_lockres_clear_refmap_bit(bit,res) \
784 __dlm_lockres_clear_refmap_bit(bit,res,__FILE__,__LINE__)
786 static inline void __dlm_lockres_set_refmap_bit(int bit,
787 struct dlm_lock_resource *res,
791 //printk("%s:%d:%.*s: setting bit %d\n", file, line,
792 // res->lockname.len, res->lockname.name, bit);
793 set_bit(bit, res->refmap);
796 static inline void __dlm_lockres_clear_refmap_bit(int bit,
797 struct dlm_lock_resource *res,
801 //printk("%s:%d:%.*s: clearing bit %d\n", file, line,
802 // res->lockname.len, res->lockname.name, bit);
803 clear_bit(bit, res->refmap);
806 void __dlm_lockres_drop_inflight_ref(struct dlm_ctxt *dlm,
807 struct dlm_lock_resource *res,
810 void __dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm,
811 struct dlm_lock_resource *res,
815 #define dlm_lockres_drop_inflight_ref(d,r) \
816 __dlm_lockres_drop_inflight_ref(d,r,__FILE__,__LINE__)
817 #define dlm_lockres_grab_inflight_ref(d,r) \
818 __dlm_lockres_grab_inflight_ref(d,r,0,__FILE__,__LINE__)
819 #define dlm_lockres_grab_inflight_ref_new(d,r) \
820 __dlm_lockres_grab_inflight_ref(d,r,1,__FILE__,__LINE__)
822 void dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock);
823 void dlm_queue_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock);
824 void dlm_do_local_ast(struct dlm_ctxt *dlm,
825 struct dlm_lock_resource *res,
826 struct dlm_lock *lock);
827 int dlm_do_remote_ast(struct dlm_ctxt *dlm,
828 struct dlm_lock_resource *res,
829 struct dlm_lock *lock);
830 void dlm_do_local_bast(struct dlm_ctxt *dlm,
831 struct dlm_lock_resource *res,
832 struct dlm_lock *lock,
834 int dlm_send_proxy_ast_msg(struct dlm_ctxt *dlm,
835 struct dlm_lock_resource *res,
836 struct dlm_lock *lock,
838 int blocked_type, int flags);
839 static inline int dlm_send_proxy_bast(struct dlm_ctxt *dlm,
840 struct dlm_lock_resource *res,
841 struct dlm_lock *lock,
844 return dlm_send_proxy_ast_msg(dlm, res, lock, DLM_BAST,
848 static inline int dlm_send_proxy_ast(struct dlm_ctxt *dlm,
849 struct dlm_lock_resource *res,
850 struct dlm_lock *lock,
853 return dlm_send_proxy_ast_msg(dlm, res, lock, DLM_AST,
857 void dlm_print_one_lock_resource(struct dlm_lock_resource *res);
858 void __dlm_print_one_lock_resource(struct dlm_lock_resource *res);
860 u8 dlm_nm_this_node(struct dlm_ctxt *dlm);
861 void dlm_kick_thread(struct dlm_ctxt *dlm, struct dlm_lock_resource *res);
862 void __dlm_dirty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res);
865 int dlm_nm_init(struct dlm_ctxt *dlm);
866 int dlm_heartbeat_init(struct dlm_ctxt *dlm);
867 void dlm_hb_node_down_cb(struct o2nm_node *node, int idx, void *data);
868 void dlm_hb_node_up_cb(struct o2nm_node *node, int idx, void *data);
870 int dlm_empty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res);
871 int dlm_finish_migration(struct dlm_ctxt *dlm,
872 struct dlm_lock_resource *res,
874 void dlm_lockres_release_ast(struct dlm_ctxt *dlm,
875 struct dlm_lock_resource *res);
876 void __dlm_lockres_reserve_ast(struct dlm_lock_resource *res);
878 int dlm_master_request_handler(struct o2net_msg *msg, u32 len, void *data,
880 int dlm_assert_master_handler(struct o2net_msg *msg, u32 len, void *data,
882 int dlm_deref_lockres_handler(struct o2net_msg *msg, u32 len, void *data,
884 int dlm_migrate_request_handler(struct o2net_msg *msg, u32 len, void *data,
886 int dlm_mig_lockres_handler(struct o2net_msg *msg, u32 len, void *data,
888 int dlm_master_requery_handler(struct o2net_msg *msg, u32 len, void *data,
890 int dlm_request_all_locks_handler(struct o2net_msg *msg, u32 len, void *data,
892 int dlm_reco_data_done_handler(struct o2net_msg *msg, u32 len, void *data,
894 int dlm_begin_reco_handler(struct o2net_msg *msg, u32 len, void *data,
896 int dlm_finalize_reco_handler(struct o2net_msg *msg, u32 len, void *data,
898 int dlm_do_master_requery(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
899 u8 nodenum, u8 *real_master);
902 int dlm_dispatch_assert_master(struct dlm_ctxt *dlm,
903 struct dlm_lock_resource *res,
909 int dlm_send_one_lockres(struct dlm_ctxt *dlm,
910 struct dlm_lock_resource *res,
911 struct dlm_migratable_lockres *mres,
914 void dlm_move_lockres_to_recovery_list(struct dlm_ctxt *dlm,
915 struct dlm_lock_resource *res);
917 /* will exit holding res->spinlock, but may drop in function */
918 void __dlm_wait_on_lockres_flags(struct dlm_lock_resource *res, int flags);
919 void __dlm_wait_on_lockres_flags_set(struct dlm_lock_resource *res, int flags);
921 /* will exit holding res->spinlock, but may drop in function */
922 static inline void __dlm_wait_on_lockres(struct dlm_lock_resource *res)
924 __dlm_wait_on_lockres_flags(res, (DLM_LOCK_RES_IN_PROGRESS|
925 DLM_LOCK_RES_RECOVERING|
926 DLM_LOCK_RES_MIGRATING));
930 int dlm_init_mle_cache(void);
931 void dlm_destroy_mle_cache(void);
932 void dlm_hb_event_notify_attached(struct dlm_ctxt *dlm, int idx, int node_up);
933 int dlm_drop_lockres_ref(struct dlm_ctxt *dlm,
934 struct dlm_lock_resource *res);
935 void dlm_clean_master_list(struct dlm_ctxt *dlm,
937 int dlm_lock_basts_flushed(struct dlm_ctxt *dlm, struct dlm_lock *lock);
938 int __dlm_lockres_has_locks(struct dlm_lock_resource *res);
939 int __dlm_lockres_unused(struct dlm_lock_resource *res);
941 static inline const char * dlm_lock_mode_name(int mode)
955 static inline int dlm_lock_compatible(int existing, int request)
957 /* NO_LOCK compatible with all */
958 if (request == LKM_NLMODE ||
959 existing == LKM_NLMODE)
962 /* EX incompatible with all non-NO_LOCK */
963 if (request == LKM_EXMODE)
966 /* request must be PR, which is compatible with PR */
967 if (existing == LKM_PRMODE)
973 static inline int dlm_lock_on_list(struct list_head *head,
974 struct dlm_lock *lock)
976 struct list_head *iter;
977 struct dlm_lock *tmplock;
979 list_for_each(iter, head) {
980 tmplock = list_entry(iter, struct dlm_lock, list);
988 static inline enum dlm_status dlm_err_to_dlm_status(int err)
993 else if (err == -ETIMEDOUT || o2net_link_down(err, NULL))
995 else if (err == -EINVAL)
997 else if (err == -ENAMETOOLONG)
1005 static inline void dlm_node_iter_init(unsigned long *map,
1006 struct dlm_node_iter *iter)
1008 memcpy(iter->node_map, map, sizeof(iter->node_map));
1012 static inline int dlm_node_iter_next(struct dlm_node_iter *iter)
1015 bit = find_next_bit(iter->node_map, O2NM_MAX_NODES, iter->curnode+1);
1016 if (bit >= O2NM_MAX_NODES) {
1017 iter->curnode = O2NM_MAX_NODES;
1020 iter->curnode = bit;
1026 #endif /* DLMCOMMON_H */