1 /* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
6 * Code which implements an OCFS2 specific interface to our DLM.
8 * Copyright (C) 2003, 2004 Oracle. All rights reserved.
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public
12 * License as published by the Free Software Foundation; either
13 * version 2 of the License, or (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
20 * You should have received a copy of the GNU General Public
21 * License along with this program; if not, write to the
22 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
23 * Boston, MA 021110-1307, USA.
26 #include <linux/types.h>
27 #include <linux/slab.h>
28 #include <linux/highmem.h>
30 #include <linux/crc32.h>
31 #include <linux/kthread.h>
32 #include <linux/pagemap.h>
33 #include <linux/debugfs.h>
34 #include <linux/seq_file.h>
36 #include <cluster/heartbeat.h>
37 #include <cluster/nodemanager.h>
38 #include <cluster/tcp.h>
40 #include <dlm/dlmapi.h>
42 #define MLOG_MASK_PREFIX ML_DLM_GLUE
43 #include <cluster/masklog.h>
46 #include "ocfs2_lockingver.h"
51 #include "extent_map.h"
53 #include "heartbeat.h"
60 #include "buffer_head_io.h"
62 struct ocfs2_mask_waiter {
63 struct list_head mw_item;
65 struct completion mw_complete;
66 unsigned long mw_mask;
67 unsigned long mw_goal;
70 static struct ocfs2_super *ocfs2_get_dentry_osb(struct ocfs2_lock_res *lockres);
71 static struct ocfs2_super *ocfs2_get_inode_osb(struct ocfs2_lock_res *lockres);
72 static struct ocfs2_super *ocfs2_get_file_osb(struct ocfs2_lock_res *lockres);
75 * Return value from ->downconvert_worker functions.
77 * These control the precise actions of ocfs2_unblock_lock()
78 * and ocfs2_process_blocked_lock()
81 enum ocfs2_unblock_action {
82 UNBLOCK_CONTINUE = 0, /* Continue downconvert */
83 UNBLOCK_CONTINUE_POST = 1, /* Continue downconvert, fire
84 * ->post_unlock callback */
85 UNBLOCK_STOP_POST = 2, /* Do not downconvert, fire
86 * ->post_unlock() callback. */
89 struct ocfs2_unblock_ctl {
91 enum ocfs2_unblock_action unblock_action;
94 static int ocfs2_check_meta_downconvert(struct ocfs2_lock_res *lockres,
96 static void ocfs2_set_meta_lvb(struct ocfs2_lock_res *lockres);
98 static int ocfs2_data_convert_worker(struct ocfs2_lock_res *lockres,
101 static int ocfs2_dentry_convert_worker(struct ocfs2_lock_res *lockres,
104 static void ocfs2_dentry_post_unlock(struct ocfs2_super *osb,
105 struct ocfs2_lock_res *lockres);
108 #define mlog_meta_lvb(__level, __lockres) ocfs2_dump_meta_lvb_info(__level, __PRETTY_FUNCTION__, __LINE__, __lockres)
110 /* This aids in debugging situations where a bad LVB might be involved. */
111 static void ocfs2_dump_meta_lvb_info(u64 level,
112 const char *function,
114 struct ocfs2_lock_res *lockres)
116 struct ocfs2_meta_lvb *lvb = (struct ocfs2_meta_lvb *) lockres->l_lksb.lvb;
118 mlog(level, "LVB information for %s (called from %s:%u):\n",
119 lockres->l_name, function, line);
120 mlog(level, "version: %u, clusters: %u, generation: 0x%x\n",
121 lvb->lvb_version, be32_to_cpu(lvb->lvb_iclusters),
122 be32_to_cpu(lvb->lvb_igeneration));
123 mlog(level, "size: %llu, uid %u, gid %u, mode 0x%x\n",
124 (unsigned long long)be64_to_cpu(lvb->lvb_isize),
125 be32_to_cpu(lvb->lvb_iuid), be32_to_cpu(lvb->lvb_igid),
126 be16_to_cpu(lvb->lvb_imode));
127 mlog(level, "nlink %u, atime_packed 0x%llx, ctime_packed 0x%llx, "
128 "mtime_packed 0x%llx iattr 0x%x\n", be16_to_cpu(lvb->lvb_inlink),
129 (long long)be64_to_cpu(lvb->lvb_iatime_packed),
130 (long long)be64_to_cpu(lvb->lvb_ictime_packed),
131 (long long)be64_to_cpu(lvb->lvb_imtime_packed),
132 be32_to_cpu(lvb->lvb_iattr));
137 * OCFS2 Lock Resource Operations
139 * These fine tune the behavior of the generic dlmglue locking infrastructure.
141 * The most basic of lock types can point ->l_priv to their respective
142 * struct ocfs2_super and allow the default actions to manage things.
144 * Right now, each lock type also needs to implement an init function,
145 * and trivial lock/unlock wrappers. ocfs2_simple_drop_lockres()
146 * should be called when the lock is no longer needed (i.e., object
149 struct ocfs2_lock_res_ops {
151 * Translate an ocfs2_lock_res * into an ocfs2_super *. Define
152 * this callback if ->l_priv is not an ocfs2_super pointer
154 struct ocfs2_super * (*get_osb)(struct ocfs2_lock_res *);
157 * Optionally called in the downconvert thread after a
158 * successful downconvert. The lockres will not be referenced
159 * after this callback is called, so it is safe to free
162 * The exact semantics of when this is called are controlled
163 * by ->downconvert_worker()
165 void (*post_unlock)(struct ocfs2_super *, struct ocfs2_lock_res *);
168 * Allow a lock type to add checks to determine whether it is
169 * safe to downconvert a lock. Return 0 to re-queue the
170 * downconvert at a later time, nonzero to continue.
172 * For most locks, the default checks that there are no
173 * incompatible holders are sufficient.
175 * Called with the lockres spinlock held.
177 int (*check_downconvert)(struct ocfs2_lock_res *, int);
180 * Allows a lock type to populate the lock value block. This
181 * is called on downconvert, and when we drop a lock.
183 * Locks that want to use this should set LOCK_TYPE_USES_LVB
184 * in the flags field.
186 * Called with the lockres spinlock held.
188 void (*set_lvb)(struct ocfs2_lock_res *);
191 * Called from the downconvert thread when it is determined
192 * that a lock will be downconverted. This is called without
193 * any locks held so the function can do work that might
194 * schedule (syncing out data, etc).
196 * This should return any one of the ocfs2_unblock_action
197 * values, depending on what it wants the thread to do.
199 int (*downconvert_worker)(struct ocfs2_lock_res *, int);
202 * LOCK_TYPE_* flags which describe the specific requirements
203 * of a lock type. Descriptions of each individual flag follow.
209 * Some locks want to "refresh" potentially stale data when a
210 * meaningful (PRMODE or EXMODE) lock level is first obtained. If this
211 * flag is set, the OCFS2_LOCK_NEEDS_REFRESH flag will be set on the
212 * individual lockres l_flags member from the ast function. It is
213 * expected that the locking wrapper will clear the
214 * OCFS2_LOCK_NEEDS_REFRESH flag when done.
216 #define LOCK_TYPE_REQUIRES_REFRESH 0x1
219 * Indicate that a lock type makes use of the lock value block. The
220 * ->set_lvb lock type callback must be defined.
222 #define LOCK_TYPE_USES_LVB 0x2
224 static struct ocfs2_lock_res_ops ocfs2_inode_rw_lops = {
225 .get_osb = ocfs2_get_inode_osb,
229 static struct ocfs2_lock_res_ops ocfs2_inode_inode_lops = {
230 .get_osb = ocfs2_get_inode_osb,
231 .check_downconvert = ocfs2_check_meta_downconvert,
232 .set_lvb = ocfs2_set_meta_lvb,
233 .downconvert_worker = ocfs2_data_convert_worker,
234 .flags = LOCK_TYPE_REQUIRES_REFRESH|LOCK_TYPE_USES_LVB,
237 static struct ocfs2_lock_res_ops ocfs2_super_lops = {
238 .flags = LOCK_TYPE_REQUIRES_REFRESH,
241 static struct ocfs2_lock_res_ops ocfs2_rename_lops = {
245 static struct ocfs2_lock_res_ops ocfs2_dentry_lops = {
246 .get_osb = ocfs2_get_dentry_osb,
247 .post_unlock = ocfs2_dentry_post_unlock,
248 .downconvert_worker = ocfs2_dentry_convert_worker,
252 static struct ocfs2_lock_res_ops ocfs2_inode_open_lops = {
253 .get_osb = ocfs2_get_inode_osb,
257 static struct ocfs2_lock_res_ops ocfs2_flock_lops = {
258 .get_osb = ocfs2_get_file_osb,
263 * This is the filesystem locking protocol version.
265 * Whenever the filesystem does new things with locks (adds or removes a
266 * lock, orders them differently, does different things underneath a lock),
267 * the version must be changed. The protocol is negotiated when joining
268 * the dlm domain. A node may join the domain if its major version is
269 * identical to all other nodes and its minor version is greater than
270 * or equal to all other nodes. When its minor version is greater than
271 * the other nodes, it will run at the minor version specified by the
274 * If a locking change is made that will not be compatible with older
275 * versions, the major number must be increased and the minor version set
276 * to zero. If a change merely adds a behavior that can be disabled when
277 * speaking to older versions, the minor version must be increased. If a
278 * change adds a fully backwards compatible change (eg, LVB changes that
279 * are just ignored by older versions), the version does not need to be
282 const struct dlm_protocol_version ocfs2_locking_protocol = {
283 .pv_major = OCFS2_LOCKING_PROTOCOL_MAJOR,
284 .pv_minor = OCFS2_LOCKING_PROTOCOL_MINOR,
287 static inline int ocfs2_is_inode_lock(struct ocfs2_lock_res *lockres)
289 return lockres->l_type == OCFS2_LOCK_TYPE_META ||
290 lockres->l_type == OCFS2_LOCK_TYPE_RW ||
291 lockres->l_type == OCFS2_LOCK_TYPE_OPEN;
294 static inline struct inode *ocfs2_lock_res_inode(struct ocfs2_lock_res *lockres)
296 BUG_ON(!ocfs2_is_inode_lock(lockres));
298 return (struct inode *) lockres->l_priv;
301 static inline struct ocfs2_dentry_lock *ocfs2_lock_res_dl(struct ocfs2_lock_res *lockres)
303 BUG_ON(lockres->l_type != OCFS2_LOCK_TYPE_DENTRY);
305 return (struct ocfs2_dentry_lock *)lockres->l_priv;
308 static inline struct ocfs2_super *ocfs2_get_lockres_osb(struct ocfs2_lock_res *lockres)
310 if (lockres->l_ops->get_osb)
311 return lockres->l_ops->get_osb(lockres);
313 return (struct ocfs2_super *)lockres->l_priv;
316 static int ocfs2_lock_create(struct ocfs2_super *osb,
317 struct ocfs2_lock_res *lockres,
320 static inline int ocfs2_may_continue_on_blocked_lock(struct ocfs2_lock_res *lockres,
322 static void ocfs2_cluster_unlock(struct ocfs2_super *osb,
323 struct ocfs2_lock_res *lockres,
325 static inline void ocfs2_generic_handle_downconvert_action(struct ocfs2_lock_res *lockres);
326 static inline void ocfs2_generic_handle_convert_action(struct ocfs2_lock_res *lockres);
327 static inline void ocfs2_generic_handle_attach_action(struct ocfs2_lock_res *lockres);
328 static int ocfs2_generic_handle_bast(struct ocfs2_lock_res *lockres, int level);
329 static void ocfs2_schedule_blocked_lock(struct ocfs2_super *osb,
330 struct ocfs2_lock_res *lockres);
331 static inline void ocfs2_recover_from_dlm_error(struct ocfs2_lock_res *lockres,
333 #define ocfs2_log_dlm_error(_func, _stat, _lockres) do { \
334 mlog(ML_ERROR, "Dlm error \"%s\" while calling %s on " \
335 "resource %s: %s\n", dlm_errname(_stat), _func, \
336 _lockres->l_name, dlm_errmsg(_stat)); \
338 static int ocfs2_downconvert_thread(void *arg);
339 static void ocfs2_downconvert_on_unlock(struct ocfs2_super *osb,
340 struct ocfs2_lock_res *lockres);
341 static int ocfs2_inode_lock_update(struct inode *inode,
342 struct buffer_head **bh);
343 static void ocfs2_drop_osb_locks(struct ocfs2_super *osb);
344 static inline int ocfs2_highest_compat_lock_level(int level);
345 static void ocfs2_prepare_downconvert(struct ocfs2_lock_res *lockres,
347 static int ocfs2_downconvert_lock(struct ocfs2_super *osb,
348 struct ocfs2_lock_res *lockres,
351 static int ocfs2_prepare_cancel_convert(struct ocfs2_super *osb,
352 struct ocfs2_lock_res *lockres);
353 static int ocfs2_cancel_convert(struct ocfs2_super *osb,
354 struct ocfs2_lock_res *lockres);
357 static void ocfs2_build_lock_name(enum ocfs2_lock_type type,
366 BUG_ON(type >= OCFS2_NUM_LOCK_TYPES);
368 len = snprintf(name, OCFS2_LOCK_ID_MAX_LEN, "%c%s%016llx%08x",
369 ocfs2_lock_type_char(type), OCFS2_LOCK_ID_PAD,
370 (long long)blkno, generation);
372 BUG_ON(len != (OCFS2_LOCK_ID_MAX_LEN - 1));
374 mlog(0, "built lock resource with name: %s\n", name);
379 static DEFINE_SPINLOCK(ocfs2_dlm_tracking_lock);
381 static void ocfs2_add_lockres_tracking(struct ocfs2_lock_res *res,
382 struct ocfs2_dlm_debug *dlm_debug)
384 mlog(0, "Add tracking for lockres %s\n", res->l_name);
386 spin_lock(&ocfs2_dlm_tracking_lock);
387 list_add(&res->l_debug_list, &dlm_debug->d_lockres_tracking);
388 spin_unlock(&ocfs2_dlm_tracking_lock);
391 static void ocfs2_remove_lockres_tracking(struct ocfs2_lock_res *res)
393 spin_lock(&ocfs2_dlm_tracking_lock);
394 if (!list_empty(&res->l_debug_list))
395 list_del_init(&res->l_debug_list);
396 spin_unlock(&ocfs2_dlm_tracking_lock);
399 static void ocfs2_lock_res_init_common(struct ocfs2_super *osb,
400 struct ocfs2_lock_res *res,
401 enum ocfs2_lock_type type,
402 struct ocfs2_lock_res_ops *ops,
409 res->l_level = LKM_IVMODE;
410 res->l_requested = LKM_IVMODE;
411 res->l_blocking = LKM_IVMODE;
412 res->l_action = OCFS2_AST_INVALID;
413 res->l_unlock_action = OCFS2_UNLOCK_INVALID;
415 res->l_flags = OCFS2_LOCK_INITIALIZED;
417 ocfs2_add_lockres_tracking(res, osb->osb_dlm_debug);
420 void ocfs2_lock_res_init_once(struct ocfs2_lock_res *res)
422 /* This also clears out the lock status block */
423 memset(res, 0, sizeof(struct ocfs2_lock_res));
424 spin_lock_init(&res->l_lock);
425 init_waitqueue_head(&res->l_event);
426 INIT_LIST_HEAD(&res->l_blocked_list);
427 INIT_LIST_HEAD(&res->l_mask_waiters);
430 void ocfs2_inode_lock_res_init(struct ocfs2_lock_res *res,
431 enum ocfs2_lock_type type,
432 unsigned int generation,
435 struct ocfs2_lock_res_ops *ops;
438 case OCFS2_LOCK_TYPE_RW:
439 ops = &ocfs2_inode_rw_lops;
441 case OCFS2_LOCK_TYPE_META:
442 ops = &ocfs2_inode_inode_lops;
444 case OCFS2_LOCK_TYPE_OPEN:
445 ops = &ocfs2_inode_open_lops;
448 mlog_bug_on_msg(1, "type: %d\n", type);
449 ops = NULL; /* thanks, gcc */
453 ocfs2_build_lock_name(type, OCFS2_I(inode)->ip_blkno,
454 generation, res->l_name);
455 ocfs2_lock_res_init_common(OCFS2_SB(inode->i_sb), res, type, ops, inode);
458 static struct ocfs2_super *ocfs2_get_inode_osb(struct ocfs2_lock_res *lockres)
460 struct inode *inode = ocfs2_lock_res_inode(lockres);
462 return OCFS2_SB(inode->i_sb);
465 static struct ocfs2_super *ocfs2_get_file_osb(struct ocfs2_lock_res *lockres)
467 struct ocfs2_file_private *fp = lockres->l_priv;
469 return OCFS2_SB(fp->fp_file->f_mapping->host->i_sb);
472 static __u64 ocfs2_get_dentry_lock_ino(struct ocfs2_lock_res *lockres)
474 __be64 inode_blkno_be;
476 memcpy(&inode_blkno_be, &lockres->l_name[OCFS2_DENTRY_LOCK_INO_START],
479 return be64_to_cpu(inode_blkno_be);
482 static struct ocfs2_super *ocfs2_get_dentry_osb(struct ocfs2_lock_res *lockres)
484 struct ocfs2_dentry_lock *dl = lockres->l_priv;
486 return OCFS2_SB(dl->dl_inode->i_sb);
489 void ocfs2_dentry_lock_res_init(struct ocfs2_dentry_lock *dl,
490 u64 parent, struct inode *inode)
493 u64 inode_blkno = OCFS2_I(inode)->ip_blkno;
494 __be64 inode_blkno_be = cpu_to_be64(inode_blkno);
495 struct ocfs2_lock_res *lockres = &dl->dl_lockres;
497 ocfs2_lock_res_init_once(lockres);
500 * Unfortunately, the standard lock naming scheme won't work
501 * here because we have two 16 byte values to use. Instead,
502 * we'll stuff the inode number as a binary value. We still
503 * want error prints to show something without garbling the
504 * display, so drop a null byte in there before the inode
505 * number. A future version of OCFS2 will likely use all
506 * binary lock names. The stringified names have been a
507 * tremendous aid in debugging, but now that the debugfs
508 * interface exists, we can mangle things there if need be.
510 * NOTE: We also drop the standard "pad" value (the total lock
511 * name size stays the same though - the last part is all
512 * zeros due to the memset in ocfs2_lock_res_init_once()
514 len = snprintf(lockres->l_name, OCFS2_DENTRY_LOCK_INO_START,
516 ocfs2_lock_type_char(OCFS2_LOCK_TYPE_DENTRY),
519 BUG_ON(len != (OCFS2_DENTRY_LOCK_INO_START - 1));
521 memcpy(&lockres->l_name[OCFS2_DENTRY_LOCK_INO_START], &inode_blkno_be,
524 ocfs2_lock_res_init_common(OCFS2_SB(inode->i_sb), lockres,
525 OCFS2_LOCK_TYPE_DENTRY, &ocfs2_dentry_lops,
529 static void ocfs2_super_lock_res_init(struct ocfs2_lock_res *res,
530 struct ocfs2_super *osb)
532 /* Superblock lockres doesn't come from a slab so we call init
533 * once on it manually. */
534 ocfs2_lock_res_init_once(res);
535 ocfs2_build_lock_name(OCFS2_LOCK_TYPE_SUPER, OCFS2_SUPER_BLOCK_BLKNO,
537 ocfs2_lock_res_init_common(osb, res, OCFS2_LOCK_TYPE_SUPER,
538 &ocfs2_super_lops, osb);
541 static void ocfs2_rename_lock_res_init(struct ocfs2_lock_res *res,
542 struct ocfs2_super *osb)
544 /* Rename lockres doesn't come from a slab so we call init
545 * once on it manually. */
546 ocfs2_lock_res_init_once(res);
547 ocfs2_build_lock_name(OCFS2_LOCK_TYPE_RENAME, 0, 0, res->l_name);
548 ocfs2_lock_res_init_common(osb, res, OCFS2_LOCK_TYPE_RENAME,
549 &ocfs2_rename_lops, osb);
552 void ocfs2_file_lock_res_init(struct ocfs2_lock_res *lockres,
553 struct ocfs2_file_private *fp)
555 struct inode *inode = fp->fp_file->f_mapping->host;
556 struct ocfs2_inode_info *oi = OCFS2_I(inode);
558 ocfs2_lock_res_init_once(lockres);
559 ocfs2_build_lock_name(OCFS2_LOCK_TYPE_FLOCK, oi->ip_blkno,
560 inode->i_generation, lockres->l_name);
561 ocfs2_lock_res_init_common(OCFS2_SB(inode->i_sb), lockres,
562 OCFS2_LOCK_TYPE_FLOCK, &ocfs2_flock_lops,
564 lockres->l_flags |= OCFS2_LOCK_NOCACHE;
567 void ocfs2_lock_res_free(struct ocfs2_lock_res *res)
571 if (!(res->l_flags & OCFS2_LOCK_INITIALIZED))
574 ocfs2_remove_lockres_tracking(res);
576 mlog_bug_on_msg(!list_empty(&res->l_blocked_list),
577 "Lockres %s is on the blocked list\n",
579 mlog_bug_on_msg(!list_empty(&res->l_mask_waiters),
580 "Lockres %s has mask waiters pending\n",
582 mlog_bug_on_msg(spin_is_locked(&res->l_lock),
583 "Lockres %s is locked\n",
585 mlog_bug_on_msg(res->l_ro_holders,
586 "Lockres %s has %u ro holders\n",
587 res->l_name, res->l_ro_holders);
588 mlog_bug_on_msg(res->l_ex_holders,
589 "Lockres %s has %u ex holders\n",
590 res->l_name, res->l_ex_holders);
592 /* Need to clear out the lock status block for the dlm */
593 memset(&res->l_lksb, 0, sizeof(res->l_lksb));
599 static inline void ocfs2_inc_holders(struct ocfs2_lock_res *lockres,
608 lockres->l_ex_holders++;
611 lockres->l_ro_holders++;
620 static inline void ocfs2_dec_holders(struct ocfs2_lock_res *lockres,
629 BUG_ON(!lockres->l_ex_holders);
630 lockres->l_ex_holders--;
633 BUG_ON(!lockres->l_ro_holders);
634 lockres->l_ro_holders--;
642 /* WARNING: This function lives in a world where the only three lock
643 * levels are EX, PR, and NL. It *will* have to be adjusted when more
644 * lock types are added. */
645 static inline int ocfs2_highest_compat_lock_level(int level)
647 int new_level = LKM_EXMODE;
649 if (level == LKM_EXMODE)
650 new_level = LKM_NLMODE;
651 else if (level == LKM_PRMODE)
652 new_level = LKM_PRMODE;
656 static void lockres_set_flags(struct ocfs2_lock_res *lockres,
657 unsigned long newflags)
659 struct ocfs2_mask_waiter *mw, *tmp;
661 assert_spin_locked(&lockres->l_lock);
663 lockres->l_flags = newflags;
665 list_for_each_entry_safe(mw, tmp, &lockres->l_mask_waiters, mw_item) {
666 if ((lockres->l_flags & mw->mw_mask) != mw->mw_goal)
669 list_del_init(&mw->mw_item);
671 complete(&mw->mw_complete);
674 static void lockres_or_flags(struct ocfs2_lock_res *lockres, unsigned long or)
676 lockres_set_flags(lockres, lockres->l_flags | or);
678 static void lockres_clear_flags(struct ocfs2_lock_res *lockres,
681 lockres_set_flags(lockres, lockres->l_flags & ~clear);
684 static inline void ocfs2_generic_handle_downconvert_action(struct ocfs2_lock_res *lockres)
688 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BUSY));
689 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_ATTACHED));
690 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BLOCKED));
691 BUG_ON(lockres->l_blocking <= LKM_NLMODE);
693 lockres->l_level = lockres->l_requested;
694 if (lockres->l_level <=
695 ocfs2_highest_compat_lock_level(lockres->l_blocking)) {
696 lockres->l_blocking = LKM_NLMODE;
697 lockres_clear_flags(lockres, OCFS2_LOCK_BLOCKED);
699 lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
704 static inline void ocfs2_generic_handle_convert_action(struct ocfs2_lock_res *lockres)
708 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BUSY));
709 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_ATTACHED));
711 /* Convert from RO to EX doesn't really need anything as our
712 * information is already up to data. Convert from NL to
713 * *anything* however should mark ourselves as needing an
715 if (lockres->l_level == LKM_NLMODE &&
716 lockres->l_ops->flags & LOCK_TYPE_REQUIRES_REFRESH)
717 lockres_or_flags(lockres, OCFS2_LOCK_NEEDS_REFRESH);
719 lockres->l_level = lockres->l_requested;
720 lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
725 static inline void ocfs2_generic_handle_attach_action(struct ocfs2_lock_res *lockres)
729 BUG_ON((!(lockres->l_flags & OCFS2_LOCK_BUSY)));
730 BUG_ON(lockres->l_flags & OCFS2_LOCK_ATTACHED);
732 if (lockres->l_requested > LKM_NLMODE &&
733 !(lockres->l_flags & OCFS2_LOCK_LOCAL) &&
734 lockres->l_ops->flags & LOCK_TYPE_REQUIRES_REFRESH)
735 lockres_or_flags(lockres, OCFS2_LOCK_NEEDS_REFRESH);
737 lockres->l_level = lockres->l_requested;
738 lockres_or_flags(lockres, OCFS2_LOCK_ATTACHED);
739 lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
744 static int ocfs2_generic_handle_bast(struct ocfs2_lock_res *lockres,
747 int needs_downconvert = 0;
750 assert_spin_locked(&lockres->l_lock);
752 lockres_or_flags(lockres, OCFS2_LOCK_BLOCKED);
754 if (level > lockres->l_blocking) {
755 /* only schedule a downconvert if we haven't already scheduled
756 * one that goes low enough to satisfy the level we're
757 * blocking. this also catches the case where we get
759 if (ocfs2_highest_compat_lock_level(level) <
760 ocfs2_highest_compat_lock_level(lockres->l_blocking))
761 needs_downconvert = 1;
763 lockres->l_blocking = level;
766 mlog_exit(needs_downconvert);
767 return needs_downconvert;
770 static void ocfs2_blocking_ast(void *opaque, int level)
772 struct ocfs2_lock_res *lockres = opaque;
773 struct ocfs2_super *osb = ocfs2_get_lockres_osb(lockres);
774 int needs_downconvert;
777 BUG_ON(level <= LKM_NLMODE);
779 mlog(0, "BAST fired for lockres %s, blocking %d, level %d type %s\n",
780 lockres->l_name, level, lockres->l_level,
781 ocfs2_lock_type_string(lockres->l_type));
784 * We can skip the bast for locks which don't enable caching -
785 * they'll be dropped at the earliest possible time anyway.
787 if (lockres->l_flags & OCFS2_LOCK_NOCACHE)
790 spin_lock_irqsave(&lockres->l_lock, flags);
791 needs_downconvert = ocfs2_generic_handle_bast(lockres, level);
792 if (needs_downconvert)
793 ocfs2_schedule_blocked_lock(osb, lockres);
794 spin_unlock_irqrestore(&lockres->l_lock, flags);
796 wake_up(&lockres->l_event);
798 ocfs2_wake_downconvert_thread(osb);
801 static void ocfs2_locking_ast(void *opaque)
803 struct ocfs2_lock_res *lockres = opaque;
804 struct dlm_lockstatus *lksb = &lockres->l_lksb;
807 spin_lock_irqsave(&lockres->l_lock, flags);
809 if (lksb->status != DLM_NORMAL) {
810 mlog(ML_ERROR, "lockres %s: lksb status value of %u!\n",
811 lockres->l_name, lksb->status);
812 spin_unlock_irqrestore(&lockres->l_lock, flags);
816 switch(lockres->l_action) {
817 case OCFS2_AST_ATTACH:
818 ocfs2_generic_handle_attach_action(lockres);
819 lockres_clear_flags(lockres, OCFS2_LOCK_LOCAL);
821 case OCFS2_AST_CONVERT:
822 ocfs2_generic_handle_convert_action(lockres);
824 case OCFS2_AST_DOWNCONVERT:
825 ocfs2_generic_handle_downconvert_action(lockres);
828 mlog(ML_ERROR, "lockres %s: ast fired with invalid action: %u "
829 "lockres flags = 0x%lx, unlock action: %u\n",
830 lockres->l_name, lockres->l_action, lockres->l_flags,
831 lockres->l_unlock_action);
835 /* set it to something invalid so if we get called again we
837 lockres->l_action = OCFS2_AST_INVALID;
839 wake_up(&lockres->l_event);
840 spin_unlock_irqrestore(&lockres->l_lock, flags);
843 static inline void ocfs2_recover_from_dlm_error(struct ocfs2_lock_res *lockres,
849 spin_lock_irqsave(&lockres->l_lock, flags);
850 lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
852 lockres->l_action = OCFS2_AST_INVALID;
854 lockres->l_unlock_action = OCFS2_UNLOCK_INVALID;
855 spin_unlock_irqrestore(&lockres->l_lock, flags);
857 wake_up(&lockres->l_event);
861 /* Note: If we detect another process working on the lock (i.e.,
862 * OCFS2_LOCK_BUSY), we'll bail out returning 0. It's up to the caller
863 * to do the right thing in that case.
865 static int ocfs2_lock_create(struct ocfs2_super *osb,
866 struct ocfs2_lock_res *lockres,
871 enum dlm_status status = DLM_NORMAL;
876 mlog(0, "lock %s, level = %d, flags = %d\n", lockres->l_name, level,
879 spin_lock_irqsave(&lockres->l_lock, flags);
880 if ((lockres->l_flags & OCFS2_LOCK_ATTACHED) ||
881 (lockres->l_flags & OCFS2_LOCK_BUSY)) {
882 spin_unlock_irqrestore(&lockres->l_lock, flags);
886 lockres->l_action = OCFS2_AST_ATTACH;
887 lockres->l_requested = level;
888 lockres_or_flags(lockres, OCFS2_LOCK_BUSY);
889 spin_unlock_irqrestore(&lockres->l_lock, flags);
891 status = dlmlock(osb->dlm,
896 OCFS2_LOCK_ID_MAX_LEN - 1,
900 if (status != DLM_NORMAL) {
901 ocfs2_log_dlm_error("dlmlock", status, lockres);
903 ocfs2_recover_from_dlm_error(lockres, 1);
906 mlog(0, "lock %s, successfull return from dlmlock\n", lockres->l_name);
913 static inline int ocfs2_check_wait_flag(struct ocfs2_lock_res *lockres,
919 spin_lock_irqsave(&lockres->l_lock, flags);
920 ret = lockres->l_flags & flag;
921 spin_unlock_irqrestore(&lockres->l_lock, flags);
926 static inline void ocfs2_wait_on_busy_lock(struct ocfs2_lock_res *lockres)
929 wait_event(lockres->l_event,
930 !ocfs2_check_wait_flag(lockres, OCFS2_LOCK_BUSY));
933 static inline void ocfs2_wait_on_refreshing_lock(struct ocfs2_lock_res *lockres)
936 wait_event(lockres->l_event,
937 !ocfs2_check_wait_flag(lockres, OCFS2_LOCK_REFRESHING));
940 /* predict what lock level we'll be dropping down to on behalf
941 * of another node, and return true if the currently wanted
942 * level will be compatible with it. */
943 static inline int ocfs2_may_continue_on_blocked_lock(struct ocfs2_lock_res *lockres,
946 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BLOCKED));
948 return wanted <= ocfs2_highest_compat_lock_level(lockres->l_blocking);
951 static void ocfs2_init_mask_waiter(struct ocfs2_mask_waiter *mw)
953 INIT_LIST_HEAD(&mw->mw_item);
954 init_completion(&mw->mw_complete);
957 static int ocfs2_wait_for_mask(struct ocfs2_mask_waiter *mw)
959 wait_for_completion(&mw->mw_complete);
960 /* Re-arm the completion in case we want to wait on it again */
961 INIT_COMPLETION(mw->mw_complete);
962 return mw->mw_status;
965 static void lockres_add_mask_waiter(struct ocfs2_lock_res *lockres,
966 struct ocfs2_mask_waiter *mw,
970 BUG_ON(!list_empty(&mw->mw_item));
972 assert_spin_locked(&lockres->l_lock);
974 list_add_tail(&mw->mw_item, &lockres->l_mask_waiters);
979 /* returns 0 if the mw that was removed was already satisfied, -EBUSY
980 * if the mask still hadn't reached its goal */
981 static int lockres_remove_mask_waiter(struct ocfs2_lock_res *lockres,
982 struct ocfs2_mask_waiter *mw)
987 spin_lock_irqsave(&lockres->l_lock, flags);
988 if (!list_empty(&mw->mw_item)) {
989 if ((lockres->l_flags & mw->mw_mask) != mw->mw_goal)
992 list_del_init(&mw->mw_item);
993 init_completion(&mw->mw_complete);
995 spin_unlock_irqrestore(&lockres->l_lock, flags);
1001 static int ocfs2_wait_for_mask_interruptible(struct ocfs2_mask_waiter *mw,
1002 struct ocfs2_lock_res *lockres)
1006 ret = wait_for_completion_interruptible(&mw->mw_complete);
1008 lockres_remove_mask_waiter(lockres, mw);
1010 ret = mw->mw_status;
1011 /* Re-arm the completion in case we want to wait on it again */
1012 INIT_COMPLETION(mw->mw_complete);
1016 static int ocfs2_cluster_lock(struct ocfs2_super *osb,
1017 struct ocfs2_lock_res *lockres,
1022 struct ocfs2_mask_waiter mw;
1023 enum dlm_status status;
1024 int wait, catch_signals = !(osb->s_mount_opt & OCFS2_MOUNT_NOINTR);
1025 int ret = 0; /* gcc doesn't realize wait = 1 guarantees ret is set */
1026 unsigned long flags;
1030 ocfs2_init_mask_waiter(&mw);
1032 if (lockres->l_ops->flags & LOCK_TYPE_USES_LVB)
1033 lkm_flags |= LKM_VALBLK;
1038 if (catch_signals && signal_pending(current)) {
1043 spin_lock_irqsave(&lockres->l_lock, flags);
1045 mlog_bug_on_msg(lockres->l_flags & OCFS2_LOCK_FREEING,
1046 "Cluster lock called on freeing lockres %s! flags "
1047 "0x%lx\n", lockres->l_name, lockres->l_flags);
1049 /* We only compare against the currently granted level
1050 * here. If the lock is blocked waiting on a downconvert,
1051 * we'll get caught below. */
1052 if (lockres->l_flags & OCFS2_LOCK_BUSY &&
1053 level > lockres->l_level) {
1054 /* is someone sitting in dlm_lock? If so, wait on
1056 lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BUSY, 0);
1061 if (lockres->l_flags & OCFS2_LOCK_BLOCKED &&
1062 !ocfs2_may_continue_on_blocked_lock(lockres, level)) {
1063 /* is the lock is currently blocked on behalf of
1065 lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BLOCKED, 0);
1070 if (level > lockres->l_level) {
1071 if (lockres->l_action != OCFS2_AST_INVALID)
1072 mlog(ML_ERROR, "lockres %s has action %u pending\n",
1073 lockres->l_name, lockres->l_action);
1075 if (!(lockres->l_flags & OCFS2_LOCK_ATTACHED)) {
1076 lockres->l_action = OCFS2_AST_ATTACH;
1077 lkm_flags &= ~LKM_CONVERT;
1079 lockres->l_action = OCFS2_AST_CONVERT;
1080 lkm_flags |= LKM_CONVERT;
1083 lockres->l_requested = level;
1084 lockres_or_flags(lockres, OCFS2_LOCK_BUSY);
1085 spin_unlock_irqrestore(&lockres->l_lock, flags);
1087 BUG_ON(level == LKM_IVMODE);
1088 BUG_ON(level == LKM_NLMODE);
1090 mlog(0, "lock %s, convert from %d to level = %d\n",
1091 lockres->l_name, lockres->l_level, level);
1093 /* call dlm_lock to upgrade lock now */
1094 status = dlmlock(osb->dlm,
1099 OCFS2_LOCK_ID_MAX_LEN - 1,
1102 ocfs2_blocking_ast);
1103 if (status != DLM_NORMAL) {
1104 if ((lkm_flags & LKM_NOQUEUE) &&
1105 (status == DLM_NOTQUEUED))
1108 ocfs2_log_dlm_error("dlmlock", status,
1112 ocfs2_recover_from_dlm_error(lockres, 1);
1116 mlog(0, "lock %s, successfull return from dlmlock\n",
1119 /* At this point we've gone inside the dlm and need to
1120 * complete our work regardless. */
1123 /* wait for busy to clear and carry on */
1127 /* Ok, if we get here then we're good to go. */
1128 ocfs2_inc_holders(lockres, level);
1132 spin_unlock_irqrestore(&lockres->l_lock, flags);
1135 * This is helping work around a lock inversion between the page lock
1136 * and dlm locks. One path holds the page lock while calling aops
1137 * which block acquiring dlm locks. The voting thread holds dlm
1138 * locks while acquiring page locks while down converting data locks.
1139 * This block is helping an aop path notice the inversion and back
1140 * off to unlock its page lock before trying the dlm lock again.
1142 if (wait && arg_flags & OCFS2_LOCK_NONBLOCK &&
1143 mw.mw_mask & (OCFS2_LOCK_BUSY|OCFS2_LOCK_BLOCKED)) {
1145 if (lockres_remove_mask_waiter(lockres, &mw))
1151 ret = ocfs2_wait_for_mask(&mw);
1161 static void ocfs2_cluster_unlock(struct ocfs2_super *osb,
1162 struct ocfs2_lock_res *lockres,
1165 unsigned long flags;
1168 spin_lock_irqsave(&lockres->l_lock, flags);
1169 ocfs2_dec_holders(lockres, level);
1170 ocfs2_downconvert_on_unlock(osb, lockres);
1171 spin_unlock_irqrestore(&lockres->l_lock, flags);
1175 static int ocfs2_create_new_lock(struct ocfs2_super *osb,
1176 struct ocfs2_lock_res *lockres,
1180 int level = ex ? LKM_EXMODE : LKM_PRMODE;
1181 unsigned long flags;
1182 int lkm_flags = local ? LKM_LOCAL : 0;
1184 spin_lock_irqsave(&lockres->l_lock, flags);
1185 BUG_ON(lockres->l_flags & OCFS2_LOCK_ATTACHED);
1186 lockres_or_flags(lockres, OCFS2_LOCK_LOCAL);
1187 spin_unlock_irqrestore(&lockres->l_lock, flags);
1189 return ocfs2_lock_create(osb, lockres, level, lkm_flags);
1192 /* Grants us an EX lock on the data and metadata resources, skipping
1193 * the normal cluster directory lookup. Use this ONLY on newly created
1194 * inodes which other nodes can't possibly see, and which haven't been
1195 * hashed in the inode hash yet. This can give us a good performance
1196 * increase as it'll skip the network broadcast normally associated
1197 * with creating a new lock resource. */
1198 int ocfs2_create_new_inode_locks(struct inode *inode)
1201 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1204 BUG_ON(!ocfs2_inode_is_new(inode));
1208 mlog(0, "Inode %llu\n", (unsigned long long)OCFS2_I(inode)->ip_blkno);
1210 /* NOTE: That we don't increment any of the holder counts, nor
1211 * do we add anything to a journal handle. Since this is
1212 * supposed to be a new inode which the cluster doesn't know
1213 * about yet, there is no need to. As far as the LVB handling
1214 * is concerned, this is basically like acquiring an EX lock
1215 * on a resource which has an invalid one -- we'll set it
1216 * valid when we release the EX. */
1218 ret = ocfs2_create_new_lock(osb, &OCFS2_I(inode)->ip_rw_lockres, 1, 1);
1225 * We don't want to use LKM_LOCAL on a meta data lock as they
1226 * don't use a generation in their lock names.
1228 ret = ocfs2_create_new_lock(osb, &OCFS2_I(inode)->ip_inode_lockres, 1, 0);
1234 ret = ocfs2_create_new_lock(osb, &OCFS2_I(inode)->ip_open_lockres, 0, 0);
1245 int ocfs2_rw_lock(struct inode *inode, int write)
1248 struct ocfs2_lock_res *lockres;
1249 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1255 mlog(0, "inode %llu take %s RW lock\n",
1256 (unsigned long long)OCFS2_I(inode)->ip_blkno,
1257 write ? "EXMODE" : "PRMODE");
1259 if (ocfs2_mount_local(osb))
1262 lockres = &OCFS2_I(inode)->ip_rw_lockres;
1264 level = write ? LKM_EXMODE : LKM_PRMODE;
1266 status = ocfs2_cluster_lock(OCFS2_SB(inode->i_sb), lockres, level, 0,
1275 void ocfs2_rw_unlock(struct inode *inode, int write)
1277 int level = write ? LKM_EXMODE : LKM_PRMODE;
1278 struct ocfs2_lock_res *lockres = &OCFS2_I(inode)->ip_rw_lockres;
1279 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1283 mlog(0, "inode %llu drop %s RW lock\n",
1284 (unsigned long long)OCFS2_I(inode)->ip_blkno,
1285 write ? "EXMODE" : "PRMODE");
1287 if (!ocfs2_mount_local(osb))
1288 ocfs2_cluster_unlock(OCFS2_SB(inode->i_sb), lockres, level);
1294 * ocfs2_open_lock always get PR mode lock.
1296 int ocfs2_open_lock(struct inode *inode)
1299 struct ocfs2_lock_res *lockres;
1300 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1306 mlog(0, "inode %llu take PRMODE open lock\n",
1307 (unsigned long long)OCFS2_I(inode)->ip_blkno);
1309 if (ocfs2_mount_local(osb))
1312 lockres = &OCFS2_I(inode)->ip_open_lockres;
1314 status = ocfs2_cluster_lock(OCFS2_SB(inode->i_sb), lockres,
1324 int ocfs2_try_open_lock(struct inode *inode, int write)
1326 int status = 0, level;
1327 struct ocfs2_lock_res *lockres;
1328 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1334 mlog(0, "inode %llu try to take %s open lock\n",
1335 (unsigned long long)OCFS2_I(inode)->ip_blkno,
1336 write ? "EXMODE" : "PRMODE");
1338 if (ocfs2_mount_local(osb))
1341 lockres = &OCFS2_I(inode)->ip_open_lockres;
1343 level = write ? LKM_EXMODE : LKM_PRMODE;
1346 * The file system may already holding a PRMODE/EXMODE open lock.
1347 * Since we pass LKM_NOQUEUE, the request won't block waiting on
1348 * other nodes and the -EAGAIN will indicate to the caller that
1349 * this inode is still in use.
1351 status = ocfs2_cluster_lock(OCFS2_SB(inode->i_sb), lockres,
1352 level, LKM_NOQUEUE, 0);
1360 * ocfs2_open_unlock unlock PR and EX mode open locks.
1362 void ocfs2_open_unlock(struct inode *inode)
1364 struct ocfs2_lock_res *lockres = &OCFS2_I(inode)->ip_open_lockres;
1365 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1369 mlog(0, "inode %llu drop open lock\n",
1370 (unsigned long long)OCFS2_I(inode)->ip_blkno);
1372 if (ocfs2_mount_local(osb))
1375 if(lockres->l_ro_holders)
1376 ocfs2_cluster_unlock(OCFS2_SB(inode->i_sb), lockres,
1378 if(lockres->l_ex_holders)
1379 ocfs2_cluster_unlock(OCFS2_SB(inode->i_sb), lockres,
1386 static int ocfs2_flock_handle_signal(struct ocfs2_lock_res *lockres,
1390 struct ocfs2_super *osb = ocfs2_get_lockres_osb(lockres);
1391 unsigned long flags;
1392 struct ocfs2_mask_waiter mw;
1394 ocfs2_init_mask_waiter(&mw);
1397 spin_lock_irqsave(&lockres->l_lock, flags);
1398 if (lockres->l_flags & OCFS2_LOCK_BUSY) {
1399 ret = ocfs2_prepare_cancel_convert(osb, lockres);
1401 spin_unlock_irqrestore(&lockres->l_lock, flags);
1402 ret = ocfs2_cancel_convert(osb, lockres);
1409 lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BUSY, 0);
1410 spin_unlock_irqrestore(&lockres->l_lock, flags);
1412 ocfs2_wait_for_mask(&mw);
1418 * We may still have gotten the lock, in which case there's no
1419 * point to restarting the syscall.
1421 if (lockres->l_level == level)
1424 mlog(0, "Cancel returning %d. flags: 0x%lx, level: %d, act: %d\n", ret,
1425 lockres->l_flags, lockres->l_level, lockres->l_action);
1427 spin_unlock_irqrestore(&lockres->l_lock, flags);
1434 * ocfs2_file_lock() and ocfs2_file_unlock() map to a single pair of
1435 * flock() calls. The locking approach this requires is sufficiently
1436 * different from all other cluster lock types that we implement a
1437 * seperate path to the "low-level" dlm calls. In particular:
1439 * - No optimization of lock levels is done - we take at exactly
1440 * what's been requested.
1442 * - No lock caching is employed. We immediately downconvert to
1443 * no-lock at unlock time. This also means flock locks never go on
1444 * the blocking list).
1446 * - Since userspace can trivially deadlock itself with flock, we make
1447 * sure to allow cancellation of a misbehaving applications flock()
1450 * - Access to any flock lockres doesn't require concurrency, so we
1451 * can simplify the code by requiring the caller to guarantee
1452 * serialization of dlmglue flock calls.
1454 int ocfs2_file_lock(struct file *file, int ex, int trylock)
1456 int ret, level = ex ? LKM_EXMODE : LKM_PRMODE;
1457 unsigned int lkm_flags = trylock ? LKM_NOQUEUE : 0;
1458 unsigned long flags;
1459 struct ocfs2_file_private *fp = file->private_data;
1460 struct ocfs2_lock_res *lockres = &fp->fp_flock;
1461 struct ocfs2_super *osb = OCFS2_SB(file->f_mapping->host->i_sb);
1462 struct ocfs2_mask_waiter mw;
1464 ocfs2_init_mask_waiter(&mw);
1466 if ((lockres->l_flags & OCFS2_LOCK_BUSY) ||
1467 (lockres->l_level > LKM_NLMODE)) {
1469 "File lock \"%s\" has busy or locked state: flags: 0x%lx, "
1470 "level: %u\n", lockres->l_name, lockres->l_flags,
1475 spin_lock_irqsave(&lockres->l_lock, flags);
1476 if (!(lockres->l_flags & OCFS2_LOCK_ATTACHED)) {
1477 lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BUSY, 0);
1478 spin_unlock_irqrestore(&lockres->l_lock, flags);
1481 * Get the lock at NLMODE to start - that way we
1482 * can cancel the upconvert request if need be.
1484 ret = ocfs2_lock_create(osb, lockres, LKM_NLMODE, 0);
1490 ret = ocfs2_wait_for_mask(&mw);
1495 spin_lock_irqsave(&lockres->l_lock, flags);
1498 lockres->l_action = OCFS2_AST_CONVERT;
1499 lkm_flags |= LKM_CONVERT;
1500 lockres->l_requested = level;
1501 lockres_or_flags(lockres, OCFS2_LOCK_BUSY);
1503 lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BUSY, 0);
1504 spin_unlock_irqrestore(&lockres->l_lock, flags);
1506 ret = dlmlock(osb->dlm, level, &lockres->l_lksb, lkm_flags,
1507 lockres->l_name, OCFS2_LOCK_ID_MAX_LEN - 1,
1508 ocfs2_locking_ast, lockres, ocfs2_blocking_ast);
1509 if (ret != DLM_NORMAL) {
1510 if (trylock && ret == DLM_NOTQUEUED)
1513 ocfs2_log_dlm_error("dlmlock", ret, lockres);
1517 ocfs2_recover_from_dlm_error(lockres, 1);
1518 lockres_remove_mask_waiter(lockres, &mw);
1522 ret = ocfs2_wait_for_mask_interruptible(&mw, lockres);
1523 if (ret == -ERESTARTSYS) {
1525 * Userspace can cause deadlock itself with
1526 * flock(). Current behavior locally is to allow the
1527 * deadlock, but abort the system call if a signal is
1528 * received. We follow this example, otherwise a
1529 * poorly written program could sit in kernel until
1532 * Handling this is a bit more complicated for Ocfs2
1533 * though. We can't exit this function with an
1534 * outstanding lock request, so a cancel convert is
1535 * required. We intentionally overwrite 'ret' - if the
1536 * cancel fails and the lock was granted, it's easier
1537 * to just bubble sucess back up to the user.
1539 ret = ocfs2_flock_handle_signal(lockres, level);
1544 mlog(0, "Lock: \"%s\" ex: %d, trylock: %d, returns: %d\n",
1545 lockres->l_name, ex, trylock, ret);
1549 void ocfs2_file_unlock(struct file *file)
1552 unsigned long flags;
1553 struct ocfs2_file_private *fp = file->private_data;
1554 struct ocfs2_lock_res *lockres = &fp->fp_flock;
1555 struct ocfs2_super *osb = OCFS2_SB(file->f_mapping->host->i_sb);
1556 struct ocfs2_mask_waiter mw;
1558 ocfs2_init_mask_waiter(&mw);
1560 if (!(lockres->l_flags & OCFS2_LOCK_ATTACHED))
1563 if (lockres->l_level == LKM_NLMODE)
1566 mlog(0, "Unlock: \"%s\" flags: 0x%lx, level: %d, act: %d\n",
1567 lockres->l_name, lockres->l_flags, lockres->l_level,
1570 spin_lock_irqsave(&lockres->l_lock, flags);
1572 * Fake a blocking ast for the downconvert code.
1574 lockres_or_flags(lockres, OCFS2_LOCK_BLOCKED);
1575 lockres->l_blocking = LKM_EXMODE;
1577 ocfs2_prepare_downconvert(lockres, LKM_NLMODE);
1578 lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BUSY, 0);
1579 spin_unlock_irqrestore(&lockres->l_lock, flags);
1581 ret = ocfs2_downconvert_lock(osb, lockres, LKM_NLMODE, 0);
1587 ret = ocfs2_wait_for_mask(&mw);
1592 static void ocfs2_downconvert_on_unlock(struct ocfs2_super *osb,
1593 struct ocfs2_lock_res *lockres)
1599 /* If we know that another node is waiting on our lock, kick
1600 * the downconvert thread * pre-emptively when we reach a release
1602 if (lockres->l_flags & OCFS2_LOCK_BLOCKED) {
1603 switch(lockres->l_blocking) {
1605 if (!lockres->l_ex_holders && !lockres->l_ro_holders)
1609 if (!lockres->l_ex_holders)
1618 ocfs2_wake_downconvert_thread(osb);
1623 #define OCFS2_SEC_BITS 34
1624 #define OCFS2_SEC_SHIFT (64 - 34)
1625 #define OCFS2_NSEC_MASK ((1ULL << OCFS2_SEC_SHIFT) - 1)
1627 /* LVB only has room for 64 bits of time here so we pack it for
1629 static u64 ocfs2_pack_timespec(struct timespec *spec)
1632 u64 sec = spec->tv_sec;
1633 u32 nsec = spec->tv_nsec;
1635 res = (sec << OCFS2_SEC_SHIFT) | (nsec & OCFS2_NSEC_MASK);
1640 /* Call this with the lockres locked. I am reasonably sure we don't
1641 * need ip_lock in this function as anyone who would be changing those
1642 * values is supposed to be blocked in ocfs2_inode_lock right now. */
1643 static void __ocfs2_stuff_meta_lvb(struct inode *inode)
1645 struct ocfs2_inode_info *oi = OCFS2_I(inode);
1646 struct ocfs2_lock_res *lockres = &oi->ip_inode_lockres;
1647 struct ocfs2_meta_lvb *lvb;
1651 lvb = (struct ocfs2_meta_lvb *) lockres->l_lksb.lvb;
1654 * Invalidate the LVB of a deleted inode - this way other
1655 * nodes are forced to go to disk and discover the new inode
1658 if (oi->ip_flags & OCFS2_INODE_DELETED) {
1659 lvb->lvb_version = 0;
1663 lvb->lvb_version = OCFS2_LVB_VERSION;
1664 lvb->lvb_isize = cpu_to_be64(i_size_read(inode));
1665 lvb->lvb_iclusters = cpu_to_be32(oi->ip_clusters);
1666 lvb->lvb_iuid = cpu_to_be32(inode->i_uid);
1667 lvb->lvb_igid = cpu_to_be32(inode->i_gid);
1668 lvb->lvb_imode = cpu_to_be16(inode->i_mode);
1669 lvb->lvb_inlink = cpu_to_be16(inode->i_nlink);
1670 lvb->lvb_iatime_packed =
1671 cpu_to_be64(ocfs2_pack_timespec(&inode->i_atime));
1672 lvb->lvb_ictime_packed =
1673 cpu_to_be64(ocfs2_pack_timespec(&inode->i_ctime));
1674 lvb->lvb_imtime_packed =
1675 cpu_to_be64(ocfs2_pack_timespec(&inode->i_mtime));
1676 lvb->lvb_iattr = cpu_to_be32(oi->ip_attr);
1677 lvb->lvb_idynfeatures = cpu_to_be16(oi->ip_dyn_features);
1678 lvb->lvb_igeneration = cpu_to_be32(inode->i_generation);
1681 mlog_meta_lvb(0, lockres);
1686 static void ocfs2_unpack_timespec(struct timespec *spec,
1689 spec->tv_sec = packed_time >> OCFS2_SEC_SHIFT;
1690 spec->tv_nsec = packed_time & OCFS2_NSEC_MASK;
1693 static void ocfs2_refresh_inode_from_lvb(struct inode *inode)
1695 struct ocfs2_inode_info *oi = OCFS2_I(inode);
1696 struct ocfs2_lock_res *lockres = &oi->ip_inode_lockres;
1697 struct ocfs2_meta_lvb *lvb;
1701 mlog_meta_lvb(0, lockres);
1703 lvb = (struct ocfs2_meta_lvb *) lockres->l_lksb.lvb;
1705 /* We're safe here without the lockres lock... */
1706 spin_lock(&oi->ip_lock);
1707 oi->ip_clusters = be32_to_cpu(lvb->lvb_iclusters);
1708 i_size_write(inode, be64_to_cpu(lvb->lvb_isize));
1710 oi->ip_attr = be32_to_cpu(lvb->lvb_iattr);
1711 oi->ip_dyn_features = be16_to_cpu(lvb->lvb_idynfeatures);
1712 ocfs2_set_inode_flags(inode);
1714 /* fast-symlinks are a special case */
1715 if (S_ISLNK(inode->i_mode) && !oi->ip_clusters)
1716 inode->i_blocks = 0;
1718 inode->i_blocks = ocfs2_inode_sector_count(inode);
1720 inode->i_uid = be32_to_cpu(lvb->lvb_iuid);
1721 inode->i_gid = be32_to_cpu(lvb->lvb_igid);
1722 inode->i_mode = be16_to_cpu(lvb->lvb_imode);
1723 inode->i_nlink = be16_to_cpu(lvb->lvb_inlink);
1724 ocfs2_unpack_timespec(&inode->i_atime,
1725 be64_to_cpu(lvb->lvb_iatime_packed));
1726 ocfs2_unpack_timespec(&inode->i_mtime,
1727 be64_to_cpu(lvb->lvb_imtime_packed));
1728 ocfs2_unpack_timespec(&inode->i_ctime,
1729 be64_to_cpu(lvb->lvb_ictime_packed));
1730 spin_unlock(&oi->ip_lock);
1735 static inline int ocfs2_meta_lvb_is_trustable(struct inode *inode,
1736 struct ocfs2_lock_res *lockres)
1738 struct ocfs2_meta_lvb *lvb = (struct ocfs2_meta_lvb *) lockres->l_lksb.lvb;
1740 if (lvb->lvb_version == OCFS2_LVB_VERSION
1741 && be32_to_cpu(lvb->lvb_igeneration) == inode->i_generation)
1746 /* Determine whether a lock resource needs to be refreshed, and
1747 * arbitrate who gets to refresh it.
1749 * 0 means no refresh needed.
1751 * > 0 means you need to refresh this and you MUST call
1752 * ocfs2_complete_lock_res_refresh afterwards. */
1753 static int ocfs2_should_refresh_lock_res(struct ocfs2_lock_res *lockres)
1755 unsigned long flags;
1761 spin_lock_irqsave(&lockres->l_lock, flags);
1762 if (!(lockres->l_flags & OCFS2_LOCK_NEEDS_REFRESH)) {
1763 spin_unlock_irqrestore(&lockres->l_lock, flags);
1767 if (lockres->l_flags & OCFS2_LOCK_REFRESHING) {
1768 spin_unlock_irqrestore(&lockres->l_lock, flags);
1770 ocfs2_wait_on_refreshing_lock(lockres);
1774 /* Ok, I'll be the one to refresh this lock. */
1775 lockres_or_flags(lockres, OCFS2_LOCK_REFRESHING);
1776 spin_unlock_irqrestore(&lockres->l_lock, flags);
1784 /* If status is non zero, I'll mark it as not being in refresh
1785 * anymroe, but i won't clear the needs refresh flag. */
1786 static inline void ocfs2_complete_lock_res_refresh(struct ocfs2_lock_res *lockres,
1789 unsigned long flags;
1792 spin_lock_irqsave(&lockres->l_lock, flags);
1793 lockres_clear_flags(lockres, OCFS2_LOCK_REFRESHING);
1795 lockres_clear_flags(lockres, OCFS2_LOCK_NEEDS_REFRESH);
1796 spin_unlock_irqrestore(&lockres->l_lock, flags);
1798 wake_up(&lockres->l_event);
1803 /* may or may not return a bh if it went to disk. */
1804 static int ocfs2_inode_lock_update(struct inode *inode,
1805 struct buffer_head **bh)
1808 struct ocfs2_inode_info *oi = OCFS2_I(inode);
1809 struct ocfs2_lock_res *lockres = &oi->ip_inode_lockres;
1810 struct ocfs2_dinode *fe;
1811 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1815 if (ocfs2_mount_local(osb))
1818 spin_lock(&oi->ip_lock);
1819 if (oi->ip_flags & OCFS2_INODE_DELETED) {
1820 mlog(0, "Orphaned inode %llu was deleted while we "
1821 "were waiting on a lock. ip_flags = 0x%x\n",
1822 (unsigned long long)oi->ip_blkno, oi->ip_flags);
1823 spin_unlock(&oi->ip_lock);
1827 spin_unlock(&oi->ip_lock);
1829 if (!ocfs2_should_refresh_lock_res(lockres))
1832 /* This will discard any caching information we might have had
1833 * for the inode metadata. */
1834 ocfs2_metadata_cache_purge(inode);
1836 ocfs2_extent_map_trunc(inode, 0);
1838 if (ocfs2_meta_lvb_is_trustable(inode, lockres)) {
1839 mlog(0, "Trusting LVB on inode %llu\n",
1840 (unsigned long long)oi->ip_blkno);
1841 ocfs2_refresh_inode_from_lvb(inode);
1843 /* Boo, we have to go to disk. */
1844 /* read bh, cast, ocfs2_refresh_inode */
1845 status = ocfs2_read_block(OCFS2_SB(inode->i_sb), oi->ip_blkno,
1846 bh, OCFS2_BH_CACHED, inode);
1851 fe = (struct ocfs2_dinode *) (*bh)->b_data;
1853 /* This is a good chance to make sure we're not
1854 * locking an invalid object.
1856 * We bug on a stale inode here because we checked
1857 * above whether it was wiped from disk. The wiping
1858 * node provides a guarantee that we receive that
1859 * message and can mark the inode before dropping any
1860 * locks associated with it. */
1861 if (!OCFS2_IS_VALID_DINODE(fe)) {
1862 OCFS2_RO_ON_INVALID_DINODE(inode->i_sb, fe);
1866 mlog_bug_on_msg(inode->i_generation !=
1867 le32_to_cpu(fe->i_generation),
1868 "Invalid dinode %llu disk generation: %u "
1869 "inode->i_generation: %u\n",
1870 (unsigned long long)oi->ip_blkno,
1871 le32_to_cpu(fe->i_generation),
1872 inode->i_generation);
1873 mlog_bug_on_msg(le64_to_cpu(fe->i_dtime) ||
1874 !(fe->i_flags & cpu_to_le32(OCFS2_VALID_FL)),
1875 "Stale dinode %llu dtime: %llu flags: 0x%x\n",
1876 (unsigned long long)oi->ip_blkno,
1877 (unsigned long long)le64_to_cpu(fe->i_dtime),
1878 le32_to_cpu(fe->i_flags));
1880 ocfs2_refresh_inode(inode, fe);
1885 ocfs2_complete_lock_res_refresh(lockres, status);
1891 static int ocfs2_assign_bh(struct inode *inode,
1892 struct buffer_head **ret_bh,
1893 struct buffer_head *passed_bh)
1898 /* Ok, the update went to disk for us, use the
1900 *ret_bh = passed_bh;
1906 status = ocfs2_read_block(OCFS2_SB(inode->i_sb),
1907 OCFS2_I(inode)->ip_blkno,
1918 * returns < 0 error if the callback will never be called, otherwise
1919 * the result of the lock will be communicated via the callback.
1921 int ocfs2_inode_lock_full(struct inode *inode,
1922 struct buffer_head **ret_bh,
1926 int status, level, dlm_flags, acquired;
1927 struct ocfs2_lock_res *lockres = NULL;
1928 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1929 struct buffer_head *local_bh = NULL;
1935 mlog(0, "inode %llu, take %s META lock\n",
1936 (unsigned long long)OCFS2_I(inode)->ip_blkno,
1937 ex ? "EXMODE" : "PRMODE");
1941 /* We'll allow faking a readonly metadata lock for
1943 if (ocfs2_is_hard_readonly(osb)) {
1949 if (ocfs2_mount_local(osb))
1952 if (!(arg_flags & OCFS2_META_LOCK_RECOVERY))
1953 ocfs2_wait_for_recovery(osb);
1955 lockres = &OCFS2_I(inode)->ip_inode_lockres;
1956 level = ex ? LKM_EXMODE : LKM_PRMODE;
1958 if (arg_flags & OCFS2_META_LOCK_NOQUEUE)
1959 dlm_flags |= LKM_NOQUEUE;
1961 status = ocfs2_cluster_lock(osb, lockres, level, dlm_flags, arg_flags);
1963 if (status != -EAGAIN && status != -EIOCBRETRY)
1968 /* Notify the error cleanup path to drop the cluster lock. */
1971 /* We wait twice because a node may have died while we were in
1972 * the lower dlm layers. The second time though, we've
1973 * committed to owning this lock so we don't allow signals to
1974 * abort the operation. */
1975 if (!(arg_flags & OCFS2_META_LOCK_RECOVERY))
1976 ocfs2_wait_for_recovery(osb);
1980 * We only see this flag if we're being called from
1981 * ocfs2_read_locked_inode(). It means we're locking an inode
1982 * which hasn't been populated yet, so clear the refresh flag
1983 * and let the caller handle it.
1985 if (inode->i_state & I_NEW) {
1988 ocfs2_complete_lock_res_refresh(lockres, 0);
1992 /* This is fun. The caller may want a bh back, or it may
1993 * not. ocfs2_inode_lock_update definitely wants one in, but
1994 * may or may not read one, depending on what's in the
1995 * LVB. The result of all of this is that we've *only* gone to
1996 * disk if we have to, so the complexity is worthwhile. */
1997 status = ocfs2_inode_lock_update(inode, &local_bh);
1999 if (status != -ENOENT)
2005 status = ocfs2_assign_bh(inode, ret_bh, local_bh);
2014 if (ret_bh && (*ret_bh)) {
2019 ocfs2_inode_unlock(inode, ex);
2030 * This is working around a lock inversion between tasks acquiring DLM
2031 * locks while holding a page lock and the downconvert thread which
2032 * blocks dlm lock acquiry while acquiring page locks.
2034 * ** These _with_page variantes are only intended to be called from aop
2035 * methods that hold page locks and return a very specific *positive* error
2036 * code that aop methods pass up to the VFS -- test for errors with != 0. **
2038 * The DLM is called such that it returns -EAGAIN if it would have
2039 * blocked waiting for the downconvert thread. In that case we unlock
2040 * our page so the downconvert thread can make progress. Once we've
2041 * done this we have to return AOP_TRUNCATED_PAGE so the aop method
2042 * that called us can bubble that back up into the VFS who will then
2043 * immediately retry the aop call.
2045 * We do a blocking lock and immediate unlock before returning, though, so that
2046 * the lock has a great chance of being cached on this node by the time the VFS
2047 * calls back to retry the aop. This has a potential to livelock as nodes
2048 * ping locks back and forth, but that's a risk we're willing to take to avoid
2049 * the lock inversion simply.
2051 int ocfs2_inode_lock_with_page(struct inode *inode,
2052 struct buffer_head **ret_bh,
2058 ret = ocfs2_inode_lock_full(inode, ret_bh, ex, OCFS2_LOCK_NONBLOCK);
2059 if (ret == -EAGAIN) {
2061 if (ocfs2_inode_lock(inode, ret_bh, ex) == 0)
2062 ocfs2_inode_unlock(inode, ex);
2063 ret = AOP_TRUNCATED_PAGE;
2069 int ocfs2_inode_lock_atime(struct inode *inode,
2070 struct vfsmount *vfsmnt,
2076 ret = ocfs2_inode_lock(inode, NULL, 0);
2083 * If we should update atime, we will get EX lock,
2084 * otherwise we just get PR lock.
2086 if (ocfs2_should_update_atime(inode, vfsmnt)) {
2087 struct buffer_head *bh = NULL;
2089 ocfs2_inode_unlock(inode, 0);
2090 ret = ocfs2_inode_lock(inode, &bh, 1);
2096 if (ocfs2_should_update_atime(inode, vfsmnt))
2097 ocfs2_update_inode_atime(inode, bh);
2107 void ocfs2_inode_unlock(struct inode *inode,
2110 int level = ex ? LKM_EXMODE : LKM_PRMODE;
2111 struct ocfs2_lock_res *lockres = &OCFS2_I(inode)->ip_inode_lockres;
2112 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
2116 mlog(0, "inode %llu drop %s META lock\n",
2117 (unsigned long long)OCFS2_I(inode)->ip_blkno,
2118 ex ? "EXMODE" : "PRMODE");
2120 if (!ocfs2_is_hard_readonly(OCFS2_SB(inode->i_sb)) &&
2121 !ocfs2_mount_local(osb))
2122 ocfs2_cluster_unlock(OCFS2_SB(inode->i_sb), lockres, level);
2127 int ocfs2_super_lock(struct ocfs2_super *osb,
2131 int level = ex ? LKM_EXMODE : LKM_PRMODE;
2132 struct ocfs2_lock_res *lockres = &osb->osb_super_lockres;
2136 if (ocfs2_is_hard_readonly(osb))
2139 if (ocfs2_mount_local(osb))
2142 status = ocfs2_cluster_lock(osb, lockres, level, 0, 0);
2148 /* The super block lock path is really in the best position to
2149 * know when resources covered by the lock need to be
2150 * refreshed, so we do it here. Of course, making sense of
2151 * everything is up to the caller :) */
2152 status = ocfs2_should_refresh_lock_res(lockres);
2158 status = ocfs2_refresh_slot_info(osb);
2160 ocfs2_complete_lock_res_refresh(lockres, status);
2170 void ocfs2_super_unlock(struct ocfs2_super *osb,
2173 int level = ex ? LKM_EXMODE : LKM_PRMODE;
2174 struct ocfs2_lock_res *lockres = &osb->osb_super_lockres;
2176 if (!ocfs2_mount_local(osb))
2177 ocfs2_cluster_unlock(osb, lockres, level);
2180 int ocfs2_rename_lock(struct ocfs2_super *osb)
2183 struct ocfs2_lock_res *lockres = &osb->osb_rename_lockres;
2185 if (ocfs2_is_hard_readonly(osb))
2188 if (ocfs2_mount_local(osb))
2191 status = ocfs2_cluster_lock(osb, lockres, LKM_EXMODE, 0, 0);
2198 void ocfs2_rename_unlock(struct ocfs2_super *osb)
2200 struct ocfs2_lock_res *lockres = &osb->osb_rename_lockres;
2202 if (!ocfs2_mount_local(osb))
2203 ocfs2_cluster_unlock(osb, lockres, LKM_EXMODE);
2206 int ocfs2_dentry_lock(struct dentry *dentry, int ex)
2209 int level = ex ? LKM_EXMODE : LKM_PRMODE;
2210 struct ocfs2_dentry_lock *dl = dentry->d_fsdata;
2211 struct ocfs2_super *osb = OCFS2_SB(dentry->d_sb);
2215 if (ocfs2_is_hard_readonly(osb))
2218 if (ocfs2_mount_local(osb))
2221 ret = ocfs2_cluster_lock(osb, &dl->dl_lockres, level, 0, 0);
2228 void ocfs2_dentry_unlock(struct dentry *dentry, int ex)
2230 int level = ex ? LKM_EXMODE : LKM_PRMODE;
2231 struct ocfs2_dentry_lock *dl = dentry->d_fsdata;
2232 struct ocfs2_super *osb = OCFS2_SB(dentry->d_sb);
2234 if (!ocfs2_mount_local(osb))
2235 ocfs2_cluster_unlock(osb, &dl->dl_lockres, level);
2238 /* Reference counting of the dlm debug structure. We want this because
2239 * open references on the debug inodes can live on after a mount, so
2240 * we can't rely on the ocfs2_super to always exist. */
2241 static void ocfs2_dlm_debug_free(struct kref *kref)
2243 struct ocfs2_dlm_debug *dlm_debug;
2245 dlm_debug = container_of(kref, struct ocfs2_dlm_debug, d_refcnt);
2250 void ocfs2_put_dlm_debug(struct ocfs2_dlm_debug *dlm_debug)
2253 kref_put(&dlm_debug->d_refcnt, ocfs2_dlm_debug_free);
2256 static void ocfs2_get_dlm_debug(struct ocfs2_dlm_debug *debug)
2258 kref_get(&debug->d_refcnt);
2261 struct ocfs2_dlm_debug *ocfs2_new_dlm_debug(void)
2263 struct ocfs2_dlm_debug *dlm_debug;
2265 dlm_debug = kmalloc(sizeof(struct ocfs2_dlm_debug), GFP_KERNEL);
2267 mlog_errno(-ENOMEM);
2271 kref_init(&dlm_debug->d_refcnt);
2272 INIT_LIST_HEAD(&dlm_debug->d_lockres_tracking);
2273 dlm_debug->d_locking_state = NULL;
2278 /* Access to this is arbitrated for us via seq_file->sem. */
2279 struct ocfs2_dlm_seq_priv {
2280 struct ocfs2_dlm_debug *p_dlm_debug;
2281 struct ocfs2_lock_res p_iter_res;
2282 struct ocfs2_lock_res p_tmp_res;
2285 static struct ocfs2_lock_res *ocfs2_dlm_next_res(struct ocfs2_lock_res *start,
2286 struct ocfs2_dlm_seq_priv *priv)
2288 struct ocfs2_lock_res *iter, *ret = NULL;
2289 struct ocfs2_dlm_debug *dlm_debug = priv->p_dlm_debug;
2291 assert_spin_locked(&ocfs2_dlm_tracking_lock);
2293 list_for_each_entry(iter, &start->l_debug_list, l_debug_list) {
2294 /* discover the head of the list */
2295 if (&iter->l_debug_list == &dlm_debug->d_lockres_tracking) {
2296 mlog(0, "End of list found, %p\n", ret);
2300 /* We track our "dummy" iteration lockres' by a NULL
2302 if (iter->l_ops != NULL) {
2311 static void *ocfs2_dlm_seq_start(struct seq_file *m, loff_t *pos)
2313 struct ocfs2_dlm_seq_priv *priv = m->private;
2314 struct ocfs2_lock_res *iter;
2316 spin_lock(&ocfs2_dlm_tracking_lock);
2317 iter = ocfs2_dlm_next_res(&priv->p_iter_res, priv);
2319 /* Since lockres' have the lifetime of their container
2320 * (which can be inodes, ocfs2_supers, etc) we want to
2321 * copy this out to a temporary lockres while still
2322 * under the spinlock. Obviously after this we can't
2323 * trust any pointers on the copy returned, but that's
2324 * ok as the information we want isn't typically held
2326 priv->p_tmp_res = *iter;
2327 iter = &priv->p_tmp_res;
2329 spin_unlock(&ocfs2_dlm_tracking_lock);
2334 static void ocfs2_dlm_seq_stop(struct seq_file *m, void *v)
2338 static void *ocfs2_dlm_seq_next(struct seq_file *m, void *v, loff_t *pos)
2340 struct ocfs2_dlm_seq_priv *priv = m->private;
2341 struct ocfs2_lock_res *iter = v;
2342 struct ocfs2_lock_res *dummy = &priv->p_iter_res;
2344 spin_lock(&ocfs2_dlm_tracking_lock);
2345 iter = ocfs2_dlm_next_res(iter, priv);
2346 list_del_init(&dummy->l_debug_list);
2348 list_add(&dummy->l_debug_list, &iter->l_debug_list);
2349 priv->p_tmp_res = *iter;
2350 iter = &priv->p_tmp_res;
2352 spin_unlock(&ocfs2_dlm_tracking_lock);
2357 /* So that debugfs.ocfs2 can determine which format is being used */
2358 #define OCFS2_DLM_DEBUG_STR_VERSION 1
2359 static int ocfs2_dlm_seq_show(struct seq_file *m, void *v)
2363 struct ocfs2_lock_res *lockres = v;
2368 seq_printf(m, "0x%x\t", OCFS2_DLM_DEBUG_STR_VERSION);
2370 if (lockres->l_type == OCFS2_LOCK_TYPE_DENTRY)
2371 seq_printf(m, "%.*s%08x\t", OCFS2_DENTRY_LOCK_INO_START - 1,
2373 (unsigned int)ocfs2_get_dentry_lock_ino(lockres));
2375 seq_printf(m, "%.*s\t", OCFS2_LOCK_ID_MAX_LEN, lockres->l_name);
2377 seq_printf(m, "%d\t"
2388 lockres->l_unlock_action,
2389 lockres->l_ro_holders,
2390 lockres->l_ex_holders,
2391 lockres->l_requested,
2392 lockres->l_blocking);
2394 /* Dump the raw LVB */
2395 lvb = lockres->l_lksb.lvb;
2396 for(i = 0; i < DLM_LVB_LEN; i++)
2397 seq_printf(m, "0x%x\t", lvb[i]);
2400 seq_printf(m, "\n");
2404 static const struct seq_operations ocfs2_dlm_seq_ops = {
2405 .start = ocfs2_dlm_seq_start,
2406 .stop = ocfs2_dlm_seq_stop,
2407 .next = ocfs2_dlm_seq_next,
2408 .show = ocfs2_dlm_seq_show,
2411 static int ocfs2_dlm_debug_release(struct inode *inode, struct file *file)
2413 struct seq_file *seq = (struct seq_file *) file->private_data;
2414 struct ocfs2_dlm_seq_priv *priv = seq->private;
2415 struct ocfs2_lock_res *res = &priv->p_iter_res;
2417 ocfs2_remove_lockres_tracking(res);
2418 ocfs2_put_dlm_debug(priv->p_dlm_debug);
2419 return seq_release_private(inode, file);
2422 static int ocfs2_dlm_debug_open(struct inode *inode, struct file *file)
2425 struct ocfs2_dlm_seq_priv *priv;
2426 struct seq_file *seq;
2427 struct ocfs2_super *osb;
2429 priv = kzalloc(sizeof(struct ocfs2_dlm_seq_priv), GFP_KERNEL);
2435 osb = inode->i_private;
2436 ocfs2_get_dlm_debug(osb->osb_dlm_debug);
2437 priv->p_dlm_debug = osb->osb_dlm_debug;
2438 INIT_LIST_HEAD(&priv->p_iter_res.l_debug_list);
2440 ret = seq_open(file, &ocfs2_dlm_seq_ops);
2447 seq = (struct seq_file *) file->private_data;
2448 seq->private = priv;
2450 ocfs2_add_lockres_tracking(&priv->p_iter_res,
2457 static const struct file_operations ocfs2_dlm_debug_fops = {
2458 .open = ocfs2_dlm_debug_open,
2459 .release = ocfs2_dlm_debug_release,
2461 .llseek = seq_lseek,
2464 static int ocfs2_dlm_init_debug(struct ocfs2_super *osb)
2467 struct ocfs2_dlm_debug *dlm_debug = osb->osb_dlm_debug;
2469 dlm_debug->d_locking_state = debugfs_create_file("locking_state",
2471 osb->osb_debug_root,
2473 &ocfs2_dlm_debug_fops);
2474 if (!dlm_debug->d_locking_state) {
2477 "Unable to create locking state debugfs file.\n");
2481 ocfs2_get_dlm_debug(dlm_debug);
2486 static void ocfs2_dlm_shutdown_debug(struct ocfs2_super *osb)
2488 struct ocfs2_dlm_debug *dlm_debug = osb->osb_dlm_debug;
2491 debugfs_remove(dlm_debug->d_locking_state);
2492 ocfs2_put_dlm_debug(dlm_debug);
2496 int ocfs2_dlm_init(struct ocfs2_super *osb)
2500 struct dlm_ctxt *dlm = NULL;
2504 if (ocfs2_mount_local(osb))
2507 status = ocfs2_dlm_init_debug(osb);
2513 /* launch downconvert thread */
2514 osb->dc_task = kthread_run(ocfs2_downconvert_thread, osb, "ocfs2dc");
2515 if (IS_ERR(osb->dc_task)) {
2516 status = PTR_ERR(osb->dc_task);
2517 osb->dc_task = NULL;
2522 /* used by the dlm code to make message headers unique, each
2523 * node in this domain must agree on this. */
2524 dlm_key = crc32_le(0, osb->uuid_str, strlen(osb->uuid_str));
2526 /* for now, uuid == domain */
2527 dlm = dlm_register_domain(osb->uuid_str, dlm_key,
2528 &osb->osb_locking_proto);
2530 status = PTR_ERR(dlm);
2535 dlm_register_eviction_cb(dlm, &osb->osb_eviction_cb);
2538 ocfs2_super_lock_res_init(&osb->osb_super_lockres, osb);
2539 ocfs2_rename_lock_res_init(&osb->osb_rename_lockres, osb);
2546 ocfs2_dlm_shutdown_debug(osb);
2548 kthread_stop(osb->dc_task);
2555 void ocfs2_dlm_shutdown(struct ocfs2_super *osb)
2559 dlm_unregister_eviction_cb(&osb->osb_eviction_cb);
2561 ocfs2_drop_osb_locks(osb);
2564 kthread_stop(osb->dc_task);
2565 osb->dc_task = NULL;
2568 ocfs2_lock_res_free(&osb->osb_super_lockres);
2569 ocfs2_lock_res_free(&osb->osb_rename_lockres);
2571 dlm_unregister_domain(osb->dlm);
2574 ocfs2_dlm_shutdown_debug(osb);
2579 static void ocfs2_unlock_ast(void *opaque, enum dlm_status status)
2581 struct ocfs2_lock_res *lockres = opaque;
2582 unsigned long flags;
2586 mlog(0, "UNLOCK AST called on lock %s, action = %d\n", lockres->l_name,
2587 lockres->l_unlock_action);
2589 spin_lock_irqsave(&lockres->l_lock, flags);
2590 /* We tried to cancel a convert request, but it was already
2591 * granted. All we want to do here is clear our unlock
2592 * state. The wake_up call done at the bottom is redundant
2593 * (ocfs2_prepare_cancel_convert doesn't sleep on this) but doesn't
2594 * hurt anything anyway */
2595 if (status == DLM_CANCELGRANT &&
2596 lockres->l_unlock_action == OCFS2_UNLOCK_CANCEL_CONVERT) {
2597 mlog(0, "Got cancelgrant for %s\n", lockres->l_name);
2599 /* We don't clear the busy flag in this case as it
2600 * should have been cleared by the ast which the dlm
2602 goto complete_unlock;
2605 if (status != DLM_NORMAL) {
2606 mlog(ML_ERROR, "Dlm passes status %d for lock %s, "
2607 "unlock_action %d\n", status, lockres->l_name,
2608 lockres->l_unlock_action);
2609 spin_unlock_irqrestore(&lockres->l_lock, flags);
2613 switch(lockres->l_unlock_action) {
2614 case OCFS2_UNLOCK_CANCEL_CONVERT:
2615 mlog(0, "Cancel convert success for %s\n", lockres->l_name);
2616 lockres->l_action = OCFS2_AST_INVALID;
2618 case OCFS2_UNLOCK_DROP_LOCK:
2619 lockres->l_level = LKM_IVMODE;
2625 lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
2627 lockres->l_unlock_action = OCFS2_UNLOCK_INVALID;
2628 spin_unlock_irqrestore(&lockres->l_lock, flags);
2630 wake_up(&lockres->l_event);
2635 static int ocfs2_drop_lock(struct ocfs2_super *osb,
2636 struct ocfs2_lock_res *lockres)
2638 enum dlm_status status;
2639 unsigned long flags;
2642 /* We didn't get anywhere near actually using this lockres. */
2643 if (!(lockres->l_flags & OCFS2_LOCK_INITIALIZED))
2646 if (lockres->l_ops->flags & LOCK_TYPE_USES_LVB)
2647 lkm_flags |= LKM_VALBLK;
2649 spin_lock_irqsave(&lockres->l_lock, flags);
2651 mlog_bug_on_msg(!(lockres->l_flags & OCFS2_LOCK_FREEING),
2652 "lockres %s, flags 0x%lx\n",
2653 lockres->l_name, lockres->l_flags);
2655 while (lockres->l_flags & OCFS2_LOCK_BUSY) {
2656 mlog(0, "waiting on busy lock \"%s\": flags = %lx, action = "
2657 "%u, unlock_action = %u\n",
2658 lockres->l_name, lockres->l_flags, lockres->l_action,
2659 lockres->l_unlock_action);
2661 spin_unlock_irqrestore(&lockres->l_lock, flags);
2663 /* XXX: Today we just wait on any busy
2664 * locks... Perhaps we need to cancel converts in the
2666 ocfs2_wait_on_busy_lock(lockres);
2668 spin_lock_irqsave(&lockres->l_lock, flags);
2671 if (lockres->l_ops->flags & LOCK_TYPE_USES_LVB) {
2672 if (lockres->l_flags & OCFS2_LOCK_ATTACHED &&
2673 lockres->l_level == LKM_EXMODE &&
2674 !(lockres->l_flags & OCFS2_LOCK_NEEDS_REFRESH))
2675 lockres->l_ops->set_lvb(lockres);
2678 if (lockres->l_flags & OCFS2_LOCK_BUSY)
2679 mlog(ML_ERROR, "destroying busy lock: \"%s\"\n",
2681 if (lockres->l_flags & OCFS2_LOCK_BLOCKED)
2682 mlog(0, "destroying blocked lock: \"%s\"\n", lockres->l_name);
2684 if (!(lockres->l_flags & OCFS2_LOCK_ATTACHED)) {
2685 spin_unlock_irqrestore(&lockres->l_lock, flags);
2689 lockres_clear_flags(lockres, OCFS2_LOCK_ATTACHED);
2691 /* make sure we never get here while waiting for an ast to
2693 BUG_ON(lockres->l_action != OCFS2_AST_INVALID);
2695 /* is this necessary? */
2696 lockres_or_flags(lockres, OCFS2_LOCK_BUSY);
2697 lockres->l_unlock_action = OCFS2_UNLOCK_DROP_LOCK;
2698 spin_unlock_irqrestore(&lockres->l_lock, flags);
2700 mlog(0, "lock %s\n", lockres->l_name);
2702 status = dlmunlock(osb->dlm, &lockres->l_lksb, lkm_flags,
2703 ocfs2_unlock_ast, lockres);
2704 if (status != DLM_NORMAL) {
2705 ocfs2_log_dlm_error("dlmunlock", status, lockres);
2706 mlog(ML_ERROR, "lockres flags: %lu\n", lockres->l_flags);
2707 dlm_print_one_lock(lockres->l_lksb.lockid);
2710 mlog(0, "lock %s, successfull return from dlmunlock\n",
2713 ocfs2_wait_on_busy_lock(lockres);
2719 /* Mark the lockres as being dropped. It will no longer be
2720 * queued if blocking, but we still may have to wait on it
2721 * being dequeued from the downconvert thread before we can consider
2724 * You can *not* attempt to call cluster_lock on this lockres anymore. */
2725 void ocfs2_mark_lockres_freeing(struct ocfs2_lock_res *lockres)
2728 struct ocfs2_mask_waiter mw;
2729 unsigned long flags;
2731 ocfs2_init_mask_waiter(&mw);
2733 spin_lock_irqsave(&lockres->l_lock, flags);
2734 lockres->l_flags |= OCFS2_LOCK_FREEING;
2735 while (lockres->l_flags & OCFS2_LOCK_QUEUED) {
2736 lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_QUEUED, 0);
2737 spin_unlock_irqrestore(&lockres->l_lock, flags);
2739 mlog(0, "Waiting on lockres %s\n", lockres->l_name);
2741 status = ocfs2_wait_for_mask(&mw);
2745 spin_lock_irqsave(&lockres->l_lock, flags);
2747 spin_unlock_irqrestore(&lockres->l_lock, flags);
2750 void ocfs2_simple_drop_lockres(struct ocfs2_super *osb,
2751 struct ocfs2_lock_res *lockres)
2755 ocfs2_mark_lockres_freeing(lockres);
2756 ret = ocfs2_drop_lock(osb, lockres);
2761 static void ocfs2_drop_osb_locks(struct ocfs2_super *osb)
2763 ocfs2_simple_drop_lockres(osb, &osb->osb_super_lockres);
2764 ocfs2_simple_drop_lockres(osb, &osb->osb_rename_lockres);
2767 int ocfs2_drop_inode_locks(struct inode *inode)
2773 /* No need to call ocfs2_mark_lockres_freeing here -
2774 * ocfs2_clear_inode has done it for us. */
2776 err = ocfs2_drop_lock(OCFS2_SB(inode->i_sb),
2777 &OCFS2_I(inode)->ip_open_lockres);
2783 err = ocfs2_drop_lock(OCFS2_SB(inode->i_sb),
2784 &OCFS2_I(inode)->ip_inode_lockres);
2787 if (err < 0 && !status)
2790 err = ocfs2_drop_lock(OCFS2_SB(inode->i_sb),
2791 &OCFS2_I(inode)->ip_rw_lockres);
2794 if (err < 0 && !status)
2801 static void ocfs2_prepare_downconvert(struct ocfs2_lock_res *lockres,
2804 assert_spin_locked(&lockres->l_lock);
2806 BUG_ON(lockres->l_blocking <= LKM_NLMODE);
2808 if (lockres->l_level <= new_level) {
2809 mlog(ML_ERROR, "lockres->l_level (%u) <= new_level (%u)\n",
2810 lockres->l_level, new_level);
2814 mlog(0, "lock %s, new_level = %d, l_blocking = %d\n",
2815 lockres->l_name, new_level, lockres->l_blocking);
2817 lockres->l_action = OCFS2_AST_DOWNCONVERT;
2818 lockres->l_requested = new_level;
2819 lockres_or_flags(lockres, OCFS2_LOCK_BUSY);
2822 static int ocfs2_downconvert_lock(struct ocfs2_super *osb,
2823 struct ocfs2_lock_res *lockres,
2827 int ret, dlm_flags = LKM_CONVERT;
2828 enum dlm_status status;
2833 dlm_flags |= LKM_VALBLK;
2835 status = dlmlock(osb->dlm,
2840 OCFS2_LOCK_ID_MAX_LEN - 1,
2843 ocfs2_blocking_ast);
2844 if (status != DLM_NORMAL) {
2845 ocfs2_log_dlm_error("dlmlock", status, lockres);
2847 ocfs2_recover_from_dlm_error(lockres, 1);
2857 /* returns 1 when the caller should unlock and call dlmunlock */
2858 static int ocfs2_prepare_cancel_convert(struct ocfs2_super *osb,
2859 struct ocfs2_lock_res *lockres)
2861 assert_spin_locked(&lockres->l_lock);
2864 mlog(0, "lock %s\n", lockres->l_name);
2866 if (lockres->l_unlock_action == OCFS2_UNLOCK_CANCEL_CONVERT) {
2867 /* If we're already trying to cancel a lock conversion
2868 * then just drop the spinlock and allow the caller to
2869 * requeue this lock. */
2871 mlog(0, "Lockres %s, skip convert\n", lockres->l_name);
2875 /* were we in a convert when we got the bast fire? */
2876 BUG_ON(lockres->l_action != OCFS2_AST_CONVERT &&
2877 lockres->l_action != OCFS2_AST_DOWNCONVERT);
2878 /* set things up for the unlockast to know to just
2879 * clear out the ast_action and unset busy, etc. */
2880 lockres->l_unlock_action = OCFS2_UNLOCK_CANCEL_CONVERT;
2882 mlog_bug_on_msg(!(lockres->l_flags & OCFS2_LOCK_BUSY),
2883 "lock %s, invalid flags: 0x%lx\n",
2884 lockres->l_name, lockres->l_flags);
2889 static int ocfs2_cancel_convert(struct ocfs2_super *osb,
2890 struct ocfs2_lock_res *lockres)
2893 enum dlm_status status;
2896 mlog(0, "lock %s\n", lockres->l_name);
2899 status = dlmunlock(osb->dlm,
2904 if (status != DLM_NORMAL) {
2905 ocfs2_log_dlm_error("dlmunlock", status, lockres);
2907 ocfs2_recover_from_dlm_error(lockres, 0);
2910 mlog(0, "lock %s return from dlmunlock\n", lockres->l_name);
2916 static int ocfs2_unblock_lock(struct ocfs2_super *osb,
2917 struct ocfs2_lock_res *lockres,
2918 struct ocfs2_unblock_ctl *ctl)
2920 unsigned long flags;
2928 spin_lock_irqsave(&lockres->l_lock, flags);
2930 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BLOCKED));
2933 if (lockres->l_flags & OCFS2_LOCK_BUSY) {
2935 ret = ocfs2_prepare_cancel_convert(osb, lockres);
2936 spin_unlock_irqrestore(&lockres->l_lock, flags);
2938 ret = ocfs2_cancel_convert(osb, lockres);
2945 /* if we're blocking an exclusive and we have *any* holders,
2947 if ((lockres->l_blocking == LKM_EXMODE)
2948 && (lockres->l_ex_holders || lockres->l_ro_holders))
2951 /* If it's a PR we're blocking, then only
2952 * requeue if we've got any EX holders */
2953 if (lockres->l_blocking == LKM_PRMODE &&
2954 lockres->l_ex_holders)
2958 * Can we get a lock in this state if the holder counts are
2959 * zero? The meta data unblock code used to check this.
2961 if ((lockres->l_ops->flags & LOCK_TYPE_REQUIRES_REFRESH)
2962 && (lockres->l_flags & OCFS2_LOCK_REFRESHING))
2965 new_level = ocfs2_highest_compat_lock_level(lockres->l_blocking);
2967 if (lockres->l_ops->check_downconvert
2968 && !lockres->l_ops->check_downconvert(lockres, new_level))
2971 /* If we get here, then we know that there are no more
2972 * incompatible holders (and anyone asking for an incompatible
2973 * lock is blocked). We can now downconvert the lock */
2974 if (!lockres->l_ops->downconvert_worker)
2977 /* Some lockres types want to do a bit of work before
2978 * downconverting a lock. Allow that here. The worker function
2979 * may sleep, so we save off a copy of what we're blocking as
2980 * it may change while we're not holding the spin lock. */
2981 blocking = lockres->l_blocking;
2982 spin_unlock_irqrestore(&lockres->l_lock, flags);
2984 ctl->unblock_action = lockres->l_ops->downconvert_worker(lockres, blocking);
2986 if (ctl->unblock_action == UNBLOCK_STOP_POST)
2989 spin_lock_irqsave(&lockres->l_lock, flags);
2990 if (blocking != lockres->l_blocking) {
2991 /* If this changed underneath us, then we can't drop
2999 if (lockres->l_ops->flags & LOCK_TYPE_USES_LVB) {
3000 if (lockres->l_level == LKM_EXMODE)
3004 * We only set the lvb if the lock has been fully
3005 * refreshed - otherwise we risk setting stale
3006 * data. Otherwise, there's no need to actually clear
3007 * out the lvb here as it's value is still valid.
3009 if (set_lvb && !(lockres->l_flags & OCFS2_LOCK_NEEDS_REFRESH))
3010 lockres->l_ops->set_lvb(lockres);
3013 ocfs2_prepare_downconvert(lockres, new_level);
3014 spin_unlock_irqrestore(&lockres->l_lock, flags);
3015 ret = ocfs2_downconvert_lock(osb, lockres, new_level, set_lvb);
3021 spin_unlock_irqrestore(&lockres->l_lock, flags);
3028 static int ocfs2_data_convert_worker(struct ocfs2_lock_res *lockres,
3031 struct inode *inode;
3032 struct address_space *mapping;
3034 inode = ocfs2_lock_res_inode(lockres);
3035 mapping = inode->i_mapping;
3037 if (!S_ISREG(inode->i_mode))
3041 * We need this before the filemap_fdatawrite() so that it can
3042 * transfer the dirty bit from the PTE to the
3043 * page. Unfortunately this means that even for EX->PR
3044 * downconverts, we'll lose our mappings and have to build
3047 unmap_mapping_range(mapping, 0, 0, 0);
3049 if (filemap_fdatawrite(mapping)) {
3050 mlog(ML_ERROR, "Could not sync inode %llu for downconvert!",
3051 (unsigned long long)OCFS2_I(inode)->ip_blkno);
3053 sync_mapping_buffers(mapping);
3054 if (blocking == LKM_EXMODE) {
3055 truncate_inode_pages(mapping, 0);
3057 /* We only need to wait on the I/O if we're not also
3058 * truncating pages because truncate_inode_pages waits
3059 * for us above. We don't truncate pages if we're
3060 * blocking anything < EXMODE because we want to keep
3061 * them around in that case. */
3062 filemap_fdatawait(mapping);
3066 return UNBLOCK_CONTINUE;
3069 static int ocfs2_check_meta_downconvert(struct ocfs2_lock_res *lockres,
3072 struct inode *inode = ocfs2_lock_res_inode(lockres);
3073 int checkpointed = ocfs2_inode_fully_checkpointed(inode);
3075 BUG_ON(new_level != LKM_NLMODE && new_level != LKM_PRMODE);
3076 BUG_ON(lockres->l_level != LKM_EXMODE && !checkpointed);
3081 ocfs2_start_checkpoint(OCFS2_SB(inode->i_sb));
3085 static void ocfs2_set_meta_lvb(struct ocfs2_lock_res *lockres)
3087 struct inode *inode = ocfs2_lock_res_inode(lockres);
3089 __ocfs2_stuff_meta_lvb(inode);
3093 * Does the final reference drop on our dentry lock. Right now this
3094 * happens in the downconvert thread, but we could choose to simplify the
3095 * dlmglue API and push these off to the ocfs2_wq in the future.
3097 static void ocfs2_dentry_post_unlock(struct ocfs2_super *osb,
3098 struct ocfs2_lock_res *lockres)
3100 struct ocfs2_dentry_lock *dl = ocfs2_lock_res_dl(lockres);
3101 ocfs2_dentry_lock_put(osb, dl);
3105 * d_delete() matching dentries before the lock downconvert.
3107 * At this point, any process waiting to destroy the
3108 * dentry_lock due to last ref count is stopped by the
3109 * OCFS2_LOCK_QUEUED flag.
3111 * We have two potential problems
3113 * 1) If we do the last reference drop on our dentry_lock (via dput)
3114 * we'll wind up in ocfs2_release_dentry_lock(), waiting on
3115 * the downconvert to finish. Instead we take an elevated
3116 * reference and push the drop until after we've completed our
3117 * unblock processing.
3119 * 2) There might be another process with a final reference,
3120 * waiting on us to finish processing. If this is the case, we
3121 * detect it and exit out - there's no more dentries anyway.
3123 static int ocfs2_dentry_convert_worker(struct ocfs2_lock_res *lockres,
3126 struct ocfs2_dentry_lock *dl = ocfs2_lock_res_dl(lockres);
3127 struct ocfs2_inode_info *oi = OCFS2_I(dl->dl_inode);
3128 struct dentry *dentry;
3129 unsigned long flags;
3133 * This node is blocking another node from getting a read
3134 * lock. This happens when we've renamed within a
3135 * directory. We've forced the other nodes to d_delete(), but
3136 * we never actually dropped our lock because it's still
3137 * valid. The downconvert code will retain a PR for this node,
3138 * so there's no further work to do.
3140 if (blocking == LKM_PRMODE)
3141 return UNBLOCK_CONTINUE;
3144 * Mark this inode as potentially orphaned. The code in
3145 * ocfs2_delete_inode() will figure out whether it actually
3146 * needs to be freed or not.
3148 spin_lock(&oi->ip_lock);
3149 oi->ip_flags |= OCFS2_INODE_MAYBE_ORPHANED;
3150 spin_unlock(&oi->ip_lock);
3153 * Yuck. We need to make sure however that the check of
3154 * OCFS2_LOCK_FREEING and the extra reference are atomic with
3155 * respect to a reference decrement or the setting of that
3158 spin_lock_irqsave(&lockres->l_lock, flags);
3159 spin_lock(&dentry_attach_lock);
3160 if (!(lockres->l_flags & OCFS2_LOCK_FREEING)
3165 spin_unlock(&dentry_attach_lock);
3166 spin_unlock_irqrestore(&lockres->l_lock, flags);
3168 mlog(0, "extra_ref = %d\n", extra_ref);
3171 * We have a process waiting on us in ocfs2_dentry_iput(),
3172 * which means we can't have any more outstanding
3173 * aliases. There's no need to do any more work.
3176 return UNBLOCK_CONTINUE;
3178 spin_lock(&dentry_attach_lock);
3180 dentry = ocfs2_find_local_alias(dl->dl_inode,
3181 dl->dl_parent_blkno, 1);
3184 spin_unlock(&dentry_attach_lock);
3186 mlog(0, "d_delete(%.*s);\n", dentry->d_name.len,
3187 dentry->d_name.name);
3190 * The following dcache calls may do an
3191 * iput(). Normally we don't want that from the
3192 * downconverting thread, but in this case it's ok
3193 * because the requesting node already has an
3194 * exclusive lock on the inode, so it can't be queued
3195 * for a downconvert.
3200 spin_lock(&dentry_attach_lock);
3202 spin_unlock(&dentry_attach_lock);
3205 * If we are the last holder of this dentry lock, there is no
3206 * reason to downconvert so skip straight to the unlock.
3208 if (dl->dl_count == 1)
3209 return UNBLOCK_STOP_POST;
3211 return UNBLOCK_CONTINUE_POST;
3214 static void ocfs2_process_blocked_lock(struct ocfs2_super *osb,
3215 struct ocfs2_lock_res *lockres)
3218 struct ocfs2_unblock_ctl ctl = {0, 0,};
3219 unsigned long flags;
3221 /* Our reference to the lockres in this function can be
3222 * considered valid until we remove the OCFS2_LOCK_QUEUED
3228 BUG_ON(!lockres->l_ops);
3230 mlog(0, "lockres %s blocked.\n", lockres->l_name);
3232 /* Detect whether a lock has been marked as going away while
3233 * the downconvert thread was processing other things. A lock can
3234 * still be marked with OCFS2_LOCK_FREEING after this check,
3235 * but short circuiting here will still save us some
3237 spin_lock_irqsave(&lockres->l_lock, flags);
3238 if (lockres->l_flags & OCFS2_LOCK_FREEING)
3240 spin_unlock_irqrestore(&lockres->l_lock, flags);
3242 status = ocfs2_unblock_lock(osb, lockres, &ctl);
3246 spin_lock_irqsave(&lockres->l_lock, flags);
3248 if (lockres->l_flags & OCFS2_LOCK_FREEING || !ctl.requeue) {
3249 lockres_clear_flags(lockres, OCFS2_LOCK_QUEUED);
3251 ocfs2_schedule_blocked_lock(osb, lockres);
3253 mlog(0, "lockres %s, requeue = %s.\n", lockres->l_name,
3254 ctl.requeue ? "yes" : "no");
3255 spin_unlock_irqrestore(&lockres->l_lock, flags);
3257 if (ctl.unblock_action != UNBLOCK_CONTINUE
3258 && lockres->l_ops->post_unlock)
3259 lockres->l_ops->post_unlock(osb, lockres);
3264 static void ocfs2_schedule_blocked_lock(struct ocfs2_super *osb,
3265 struct ocfs2_lock_res *lockres)
3269 assert_spin_locked(&lockres->l_lock);
3271 if (lockres->l_flags & OCFS2_LOCK_FREEING) {
3272 /* Do not schedule a lock for downconvert when it's on
3273 * the way to destruction - any nodes wanting access
3274 * to the resource will get it soon. */
3275 mlog(0, "Lockres %s won't be scheduled: flags 0x%lx\n",
3276 lockres->l_name, lockres->l_flags);
3280 lockres_or_flags(lockres, OCFS2_LOCK_QUEUED);
3282 spin_lock(&osb->dc_task_lock);
3283 if (list_empty(&lockres->l_blocked_list)) {
3284 list_add_tail(&lockres->l_blocked_list,
3285 &osb->blocked_lock_list);
3286 osb->blocked_lock_count++;
3288 spin_unlock(&osb->dc_task_lock);
3293 static void ocfs2_downconvert_thread_do_work(struct ocfs2_super *osb)
3295 unsigned long processed;
3296 struct ocfs2_lock_res *lockres;
3300 spin_lock(&osb->dc_task_lock);
3301 /* grab this early so we know to try again if a state change and
3302 * wake happens part-way through our work */
3303 osb->dc_work_sequence = osb->dc_wake_sequence;
3305 processed = osb->blocked_lock_count;
3307 BUG_ON(list_empty(&osb->blocked_lock_list));
3309 lockres = list_entry(osb->blocked_lock_list.next,
3310 struct ocfs2_lock_res, l_blocked_list);
3311 list_del_init(&lockres->l_blocked_list);
3312 osb->blocked_lock_count--;
3313 spin_unlock(&osb->dc_task_lock);
3318 ocfs2_process_blocked_lock(osb, lockres);
3320 spin_lock(&osb->dc_task_lock);
3322 spin_unlock(&osb->dc_task_lock);
3327 static int ocfs2_downconvert_thread_lists_empty(struct ocfs2_super *osb)
3331 spin_lock(&osb->dc_task_lock);
3332 if (list_empty(&osb->blocked_lock_list))
3335 spin_unlock(&osb->dc_task_lock);
3339 static int ocfs2_downconvert_thread_should_wake(struct ocfs2_super *osb)
3341 int should_wake = 0;
3343 spin_lock(&osb->dc_task_lock);
3344 if (osb->dc_work_sequence != osb->dc_wake_sequence)
3346 spin_unlock(&osb->dc_task_lock);
3351 static int ocfs2_downconvert_thread(void *arg)
3354 struct ocfs2_super *osb = arg;
3356 /* only quit once we've been asked to stop and there is no more
3358 while (!(kthread_should_stop() &&
3359 ocfs2_downconvert_thread_lists_empty(osb))) {
3361 wait_event_interruptible(osb->dc_event,
3362 ocfs2_downconvert_thread_should_wake(osb) ||
3363 kthread_should_stop());
3365 mlog(0, "downconvert_thread: awoken\n");
3367 ocfs2_downconvert_thread_do_work(osb);
3370 osb->dc_task = NULL;
3374 void ocfs2_wake_downconvert_thread(struct ocfs2_super *osb)
3376 spin_lock(&osb->dc_task_lock);
3377 /* make sure the voting thread gets a swipe at whatever changes
3378 * the caller may have made to the voting state */
3379 osb->dc_wake_sequence++;
3380 spin_unlock(&osb->dc_task_lock);
3381 wake_up(&osb->dc_event);