]> err.no Git - linux-2.6/blob - fs/gfs2/locking/dlm/lock.c
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
[linux-2.6] / fs / gfs2 / locking / dlm / lock.c
1 /*
2  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
3  * Copyright (C) 2004-2005 Red Hat, Inc.  All rights reserved.
4  *
5  * This copyrighted material is made available to anyone wishing to use,
6  * modify, copy, or redistribute it subject to the terms and conditions
7  * of the GNU General Public License version 2.
8  */
9
10 #include "lock_dlm.h"
11
12 static char junk_lvb[GDLM_LVB_SIZE];
13
14
15 /* convert dlm lock-mode to gfs lock-state */
16
17 static s16 gdlm_make_lmstate(s16 dlmmode)
18 {
19         switch (dlmmode) {
20         case DLM_LOCK_IV:
21         case DLM_LOCK_NL:
22                 return LM_ST_UNLOCKED;
23         case DLM_LOCK_EX:
24                 return LM_ST_EXCLUSIVE;
25         case DLM_LOCK_CW:
26                 return LM_ST_DEFERRED;
27         case DLM_LOCK_PR:
28                 return LM_ST_SHARED;
29         }
30         gdlm_assert(0, "unknown DLM mode %d", dlmmode);
31         return -1;
32 }
33
34 /* A lock placed on this queue is re-submitted to DLM as soon as the lock_dlm
35    thread gets to it. */
36
37 static void queue_submit(struct gdlm_lock *lp)
38 {
39         struct gdlm_ls *ls = lp->ls;
40
41         spin_lock(&ls->async_lock);
42         list_add_tail(&lp->delay_list, &ls->submit);
43         spin_unlock(&ls->async_lock);
44         wake_up(&ls->thread_wait);
45 }
46
47 static void wake_up_ast(struct gdlm_lock *lp)
48 {
49         clear_bit(LFL_AST_WAIT, &lp->flags);
50         smp_mb__after_clear_bit();
51         wake_up_bit(&lp->flags, LFL_AST_WAIT);
52 }
53
54 static void gdlm_delete_lp(struct gdlm_lock *lp)
55 {
56         struct gdlm_ls *ls = lp->ls;
57
58         spin_lock(&ls->async_lock);
59         if (!list_empty(&lp->delay_list))
60                 list_del_init(&lp->delay_list);
61         ls->all_locks_count--;
62         spin_unlock(&ls->async_lock);
63
64         kfree(lp);
65 }
66
67 static void gdlm_queue_delayed(struct gdlm_lock *lp)
68 {
69         struct gdlm_ls *ls = lp->ls;
70
71         spin_lock(&ls->async_lock);
72         list_add_tail(&lp->delay_list, &ls->delayed);
73         spin_unlock(&ls->async_lock);
74 }
75
76 static void process_complete(struct gdlm_lock *lp)
77 {
78         struct gdlm_ls *ls = lp->ls;
79         struct lm_async_cb acb;
80
81         memset(&acb, 0, sizeof(acb));
82
83         if (lp->lksb.sb_status == -DLM_ECANCEL) {
84                 log_info("complete dlm cancel %x,%llx flags %lx",
85                          lp->lockname.ln_type,
86                          (unsigned long long)lp->lockname.ln_number,
87                          lp->flags);
88
89                 lp->req = lp->cur;
90                 acb.lc_ret |= LM_OUT_CANCELED;
91                 if (lp->cur == DLM_LOCK_IV)
92                         lp->lksb.sb_lkid = 0;
93                 goto out;
94         }
95
96         if (test_and_clear_bit(LFL_DLM_UNLOCK, &lp->flags)) {
97                 if (lp->lksb.sb_status != -DLM_EUNLOCK) {
98                         log_info("unlock sb_status %d %x,%llx flags %lx",
99                                  lp->lksb.sb_status, lp->lockname.ln_type,
100                                  (unsigned long long)lp->lockname.ln_number,
101                                  lp->flags);
102                         return;
103                 }
104
105                 lp->cur = DLM_LOCK_IV;
106                 lp->req = DLM_LOCK_IV;
107                 lp->lksb.sb_lkid = 0;
108
109                 if (test_and_clear_bit(LFL_UNLOCK_DELETE, &lp->flags)) {
110                         gdlm_delete_lp(lp);
111                         return;
112                 }
113                 goto out;
114         }
115
116         if (lp->lksb.sb_flags & DLM_SBF_VALNOTVALID)
117                 memset(lp->lksb.sb_lvbptr, 0, GDLM_LVB_SIZE);
118
119         if (lp->lksb.sb_flags & DLM_SBF_ALTMODE) {
120                 if (lp->req == DLM_LOCK_PR)
121                         lp->req = DLM_LOCK_CW;
122                 else if (lp->req == DLM_LOCK_CW)
123                         lp->req = DLM_LOCK_PR;
124         }
125
126         /*
127          * A canceled lock request.  The lock was just taken off the delayed
128          * list and was never even submitted to dlm.
129          */
130
131         if (test_and_clear_bit(LFL_CANCEL, &lp->flags)) {
132                 log_info("complete internal cancel %x,%llx",
133                          lp->lockname.ln_type,
134                          (unsigned long long)lp->lockname.ln_number);
135                 lp->req = lp->cur;
136                 acb.lc_ret |= LM_OUT_CANCELED;
137                 goto out;
138         }
139
140         /*
141          * An error occured.
142          */
143
144         if (lp->lksb.sb_status) {
145                 /* a "normal" error */
146                 if ((lp->lksb.sb_status == -EAGAIN) &&
147                     (lp->lkf & DLM_LKF_NOQUEUE)) {
148                         lp->req = lp->cur;
149                         if (lp->cur == DLM_LOCK_IV)
150                                 lp->lksb.sb_lkid = 0;
151                         goto out;
152                 }
153
154                 /* this could only happen with cancels I think */
155                 log_info("ast sb_status %d %x,%llx flags %lx",
156                          lp->lksb.sb_status, lp->lockname.ln_type,
157                          (unsigned long long)lp->lockname.ln_number,
158                          lp->flags);
159                 return;
160         }
161
162         /*
163          * This is an AST for an EX->EX conversion for sync_lvb from GFS.
164          */
165
166         if (test_and_clear_bit(LFL_SYNC_LVB, &lp->flags)) {
167                 wake_up_ast(lp);
168                 return;
169         }
170
171         /*
172          * A lock has been demoted to NL because it initially completed during
173          * BLOCK_LOCKS.  Now it must be requested in the originally requested
174          * mode.
175          */
176
177         if (test_and_clear_bit(LFL_REREQUEST, &lp->flags)) {
178                 gdlm_assert(lp->req == DLM_LOCK_NL, "%x,%llx",
179                             lp->lockname.ln_type,
180                             (unsigned long long)lp->lockname.ln_number);
181                 gdlm_assert(lp->prev_req > DLM_LOCK_NL, "%x,%llx",
182                             lp->lockname.ln_type,
183                             (unsigned long long)lp->lockname.ln_number);
184
185                 lp->cur = DLM_LOCK_NL;
186                 lp->req = lp->prev_req;
187                 lp->prev_req = DLM_LOCK_IV;
188                 lp->lkf &= ~DLM_LKF_CONVDEADLK;
189
190                 set_bit(LFL_NOCACHE, &lp->flags);
191
192                 if (test_bit(DFL_BLOCK_LOCKS, &ls->flags) &&
193                     !test_bit(LFL_NOBLOCK, &lp->flags))
194                         gdlm_queue_delayed(lp);
195                 else
196                         queue_submit(lp);
197                 return;
198         }
199
200         /*
201          * A request is granted during dlm recovery.  It may be granted
202          * because the locks of a failed node were cleared.  In that case,
203          * there may be inconsistent data beneath this lock and we must wait
204          * for recovery to complete to use it.  When gfs recovery is done this
205          * granted lock will be converted to NL and then reacquired in this
206          * granted state.
207          */
208
209         if (test_bit(DFL_BLOCK_LOCKS, &ls->flags) &&
210             !test_bit(LFL_NOBLOCK, &lp->flags) &&
211             lp->req != DLM_LOCK_NL) {
212
213                 lp->cur = lp->req;
214                 lp->prev_req = lp->req;
215                 lp->req = DLM_LOCK_NL;
216                 lp->lkf |= DLM_LKF_CONVERT;
217                 lp->lkf &= ~DLM_LKF_CONVDEADLK;
218
219                 log_debug("rereq %x,%llx id %x %d,%d",
220                           lp->lockname.ln_type,
221                           (unsigned long long)lp->lockname.ln_number,
222                           lp->lksb.sb_lkid, lp->cur, lp->req);
223
224                 set_bit(LFL_REREQUEST, &lp->flags);
225                 queue_submit(lp);
226                 return;
227         }
228
229         /*
230          * DLM demoted the lock to NL before it was granted so GFS must be
231          * told it cannot cache data for this lock.
232          */
233
234         if (lp->lksb.sb_flags & DLM_SBF_DEMOTED)
235                 set_bit(LFL_NOCACHE, &lp->flags);
236
237 out:
238         /*
239          * This is an internal lock_dlm lock
240          */
241
242         if (test_bit(LFL_INLOCK, &lp->flags)) {
243                 clear_bit(LFL_NOBLOCK, &lp->flags);
244                 lp->cur = lp->req;
245                 wake_up_ast(lp);
246                 return;
247         }
248
249         /*
250          * Normal completion of a lock request.  Tell GFS it now has the lock.
251          */
252
253         clear_bit(LFL_NOBLOCK, &lp->flags);
254         lp->cur = lp->req;
255
256         acb.lc_name = lp->lockname;
257         acb.lc_ret |= gdlm_make_lmstate(lp->cur);
258
259         ls->fscb(ls->sdp, LM_CB_ASYNC, &acb);
260 }
261
262 static void gdlm_ast(void *astarg)
263 {
264         struct gdlm_lock *lp = astarg;
265         clear_bit(LFL_ACTIVE, &lp->flags);
266         process_complete(lp);
267 }
268
269 static void process_blocking(struct gdlm_lock *lp, int bast_mode)
270 {
271         struct gdlm_ls *ls = lp->ls;
272         unsigned int cb = 0;
273
274         switch (gdlm_make_lmstate(bast_mode)) {
275         case LM_ST_EXCLUSIVE:
276                 cb = LM_CB_NEED_E;
277                 break;
278         case LM_ST_DEFERRED:
279                 cb = LM_CB_NEED_D;
280                 break;
281         case LM_ST_SHARED:
282                 cb = LM_CB_NEED_S;
283                 break;
284         default:
285                 gdlm_assert(0, "unknown bast mode %u", bast_mode);
286         }
287
288         ls->fscb(ls->sdp, cb, &lp->lockname);
289 }
290
291
292 static void gdlm_bast(void *astarg, int mode)
293 {
294         struct gdlm_lock *lp = astarg;
295
296         if (!mode) {
297                 printk(KERN_INFO "lock_dlm: bast mode zero %x,%llx\n",
298                         lp->lockname.ln_type,
299                         (unsigned long long)lp->lockname.ln_number);
300                 return;
301         }
302
303         process_blocking(lp, mode);
304 }
305
306 /* convert gfs lock-state to dlm lock-mode */
307
308 static s16 make_mode(s16 lmstate)
309 {
310         switch (lmstate) {
311         case LM_ST_UNLOCKED:
312                 return DLM_LOCK_NL;
313         case LM_ST_EXCLUSIVE:
314                 return DLM_LOCK_EX;
315         case LM_ST_DEFERRED:
316                 return DLM_LOCK_CW;
317         case LM_ST_SHARED:
318                 return DLM_LOCK_PR;
319         }
320         gdlm_assert(0, "unknown LM state %d", lmstate);
321         return -1;
322 }
323
324
325 /* verify agreement with GFS on the current lock state, NB: DLM_LOCK_NL and
326    DLM_LOCK_IV are both considered LM_ST_UNLOCKED by GFS. */
327
328 static void check_cur_state(struct gdlm_lock *lp, unsigned int cur_state)
329 {
330         s16 cur = make_mode(cur_state);
331         if (lp->cur != DLM_LOCK_IV)
332                 gdlm_assert(lp->cur == cur, "%d, %d", lp->cur, cur);
333 }
334
335 static inline unsigned int make_flags(struct gdlm_lock *lp,
336                                       unsigned int gfs_flags,
337                                       s16 cur, s16 req)
338 {
339         unsigned int lkf = 0;
340
341         if (gfs_flags & LM_FLAG_TRY)
342                 lkf |= DLM_LKF_NOQUEUE;
343
344         if (gfs_flags & LM_FLAG_TRY_1CB) {
345                 lkf |= DLM_LKF_NOQUEUE;
346                 lkf |= DLM_LKF_NOQUEUEBAST;
347         }
348
349         if (gfs_flags & LM_FLAG_PRIORITY) {
350                 lkf |= DLM_LKF_NOORDER;
351                 lkf |= DLM_LKF_HEADQUE;
352         }
353
354         if (gfs_flags & LM_FLAG_ANY) {
355                 if (req == DLM_LOCK_PR)
356                         lkf |= DLM_LKF_ALTCW;
357                 else if (req == DLM_LOCK_CW)
358                         lkf |= DLM_LKF_ALTPR;
359         }
360
361         if (lp->lksb.sb_lkid != 0) {
362                 lkf |= DLM_LKF_CONVERT;
363         }
364
365         if (lp->lvb)
366                 lkf |= DLM_LKF_VALBLK;
367
368         return lkf;
369 }
370
371 /* make_strname - convert GFS lock numbers to a string */
372
373 static inline void make_strname(const struct lm_lockname *lockname,
374                                 struct gdlm_strname *str)
375 {
376         sprintf(str->name, "%8x%16llx", lockname->ln_type,
377                 (unsigned long long)lockname->ln_number);
378         str->namelen = GDLM_STRNAME_BYTES;
379 }
380
381 static int gdlm_create_lp(struct gdlm_ls *ls, struct lm_lockname *name,
382                           struct gdlm_lock **lpp)
383 {
384         struct gdlm_lock *lp;
385
386         lp = kzalloc(sizeof(struct gdlm_lock), GFP_NOFS);
387         if (!lp)
388                 return -ENOMEM;
389
390         lp->lockname = *name;
391         make_strname(name, &lp->strname);
392         lp->ls = ls;
393         lp->cur = DLM_LOCK_IV;
394         INIT_LIST_HEAD(&lp->delay_list);
395
396         spin_lock(&ls->async_lock);
397         ls->all_locks_count++;
398         spin_unlock(&ls->async_lock);
399
400         *lpp = lp;
401         return 0;
402 }
403
404 int gdlm_get_lock(void *lockspace, struct lm_lockname *name,
405                   void **lockp)
406 {
407         struct gdlm_lock *lp;
408         int error;
409
410         error = gdlm_create_lp(lockspace, name, &lp);
411
412         *lockp = lp;
413         return error;
414 }
415
416 void gdlm_put_lock(void *lock)
417 {
418         gdlm_delete_lp(lock);
419 }
420
421 unsigned int gdlm_do_lock(struct gdlm_lock *lp)
422 {
423         struct gdlm_ls *ls = lp->ls;
424         int error, bast = 1;
425
426         /*
427          * When recovery is in progress, delay lock requests for submission
428          * once recovery is done.  Requests for recovery (NOEXP) and unlocks
429          * can pass.
430          */
431
432         if (test_bit(DFL_BLOCK_LOCKS, &ls->flags) &&
433             !test_bit(LFL_NOBLOCK, &lp->flags) && lp->req != DLM_LOCK_NL) {
434                 gdlm_queue_delayed(lp);
435                 return LM_OUT_ASYNC;
436         }
437
438         /*
439          * Submit the actual lock request.
440          */
441
442         if (test_bit(LFL_NOBAST, &lp->flags))
443                 bast = 0;
444
445         set_bit(LFL_ACTIVE, &lp->flags);
446
447         log_debug("lk %x,%llx id %x %d,%d %x", lp->lockname.ln_type,
448                   (unsigned long long)lp->lockname.ln_number, lp->lksb.sb_lkid,
449                   lp->cur, lp->req, lp->lkf);
450
451         error = dlm_lock(ls->dlm_lockspace, lp->req, &lp->lksb, lp->lkf,
452                          lp->strname.name, lp->strname.namelen, 0, gdlm_ast,
453                          lp, bast ? gdlm_bast : NULL);
454
455         if ((error == -EAGAIN) && (lp->lkf & DLM_LKF_NOQUEUE)) {
456                 lp->lksb.sb_status = -EAGAIN;
457                 gdlm_ast(lp);
458                 error = 0;
459         }
460
461         if (error) {
462                 log_error("%s: gdlm_lock %x,%llx err=%d cur=%d req=%d lkf=%x "
463                           "flags=%lx", ls->fsname, lp->lockname.ln_type,
464                           (unsigned long long)lp->lockname.ln_number, error,
465                           lp->cur, lp->req, lp->lkf, lp->flags);
466                 return LM_OUT_ERROR;
467         }
468         return LM_OUT_ASYNC;
469 }
470
471 static unsigned int gdlm_do_unlock(struct gdlm_lock *lp)
472 {
473         struct gdlm_ls *ls = lp->ls;
474         unsigned int lkf = 0;
475         int error;
476
477         set_bit(LFL_DLM_UNLOCK, &lp->flags);
478         set_bit(LFL_ACTIVE, &lp->flags);
479
480         if (lp->lvb)
481                 lkf = DLM_LKF_VALBLK;
482
483         log_debug("un %x,%llx %x %d %x", lp->lockname.ln_type,
484                   (unsigned long long)lp->lockname.ln_number,
485                   lp->lksb.sb_lkid, lp->cur, lkf);
486
487         error = dlm_unlock(ls->dlm_lockspace, lp->lksb.sb_lkid, lkf, NULL, lp);
488
489         if (error) {
490                 log_error("%s: gdlm_unlock %x,%llx err=%d cur=%d req=%d lkf=%x "
491                           "flags=%lx", ls->fsname, lp->lockname.ln_type,
492                           (unsigned long long)lp->lockname.ln_number, error,
493                           lp->cur, lp->req, lp->lkf, lp->flags);
494                 return LM_OUT_ERROR;
495         }
496         return LM_OUT_ASYNC;
497 }
498
499 unsigned int gdlm_lock(void *lock, unsigned int cur_state,
500                        unsigned int req_state, unsigned int flags)
501 {
502         struct gdlm_lock *lp = lock;
503
504         if (req_state == LM_ST_UNLOCKED)
505                 return gdlm_unlock(lock, cur_state);
506
507         if (req_state == LM_ST_UNLOCKED)
508                 return gdlm_unlock(lock, cur_state);
509
510         clear_bit(LFL_DLM_CANCEL, &lp->flags);
511         if (flags & LM_FLAG_NOEXP)
512                 set_bit(LFL_NOBLOCK, &lp->flags);
513
514         check_cur_state(lp, cur_state);
515         lp->req = make_mode(req_state);
516         lp->lkf = make_flags(lp, flags, lp->cur, lp->req);
517
518         return gdlm_do_lock(lp);
519 }
520
521 unsigned int gdlm_unlock(void *lock, unsigned int cur_state)
522 {
523         struct gdlm_lock *lp = lock;
524
525         clear_bit(LFL_DLM_CANCEL, &lp->flags);
526         if (lp->cur == DLM_LOCK_IV)
527                 return 0;
528         return gdlm_do_unlock(lp);
529 }
530
531 void gdlm_cancel(void *lock)
532 {
533         struct gdlm_lock *lp = lock;
534         struct gdlm_ls *ls = lp->ls;
535         int error, delay_list = 0;
536
537         if (test_bit(LFL_DLM_CANCEL, &lp->flags))
538                 return;
539
540         log_info("gdlm_cancel %x,%llx flags %lx", lp->lockname.ln_type,
541                  (unsigned long long)lp->lockname.ln_number, lp->flags);
542
543         spin_lock(&ls->async_lock);
544         if (!list_empty(&lp->delay_list)) {
545                 list_del_init(&lp->delay_list);
546                 delay_list = 1;
547         }
548         spin_unlock(&ls->async_lock);
549
550         if (delay_list) {
551                 set_bit(LFL_CANCEL, &lp->flags);
552                 set_bit(LFL_ACTIVE, &lp->flags);
553                 gdlm_ast(lp);
554                 return;
555         }
556
557         if (!test_bit(LFL_ACTIVE, &lp->flags) ||
558             test_bit(LFL_DLM_UNLOCK, &lp->flags)) {
559                 log_info("gdlm_cancel skip %x,%llx flags %lx",
560                          lp->lockname.ln_type,
561                          (unsigned long long)lp->lockname.ln_number, lp->flags);
562                 return;
563         }
564
565         /* the lock is blocked in the dlm */
566
567         set_bit(LFL_DLM_CANCEL, &lp->flags);
568         set_bit(LFL_ACTIVE, &lp->flags);
569
570         error = dlm_unlock(ls->dlm_lockspace, lp->lksb.sb_lkid, DLM_LKF_CANCEL,
571                            NULL, lp);
572
573         log_info("gdlm_cancel rv %d %x,%llx flags %lx", error,
574                  lp->lockname.ln_type,
575                  (unsigned long long)lp->lockname.ln_number, lp->flags);
576
577         if (error == -EBUSY)
578                 clear_bit(LFL_DLM_CANCEL, &lp->flags);
579 }
580
581 static int gdlm_add_lvb(struct gdlm_lock *lp)
582 {
583         char *lvb;
584
585         lvb = kzalloc(GDLM_LVB_SIZE, GFP_NOFS);
586         if (!lvb)
587                 return -ENOMEM;
588
589         lp->lksb.sb_lvbptr = lvb;
590         lp->lvb = lvb;
591         return 0;
592 }
593
594 static void gdlm_del_lvb(struct gdlm_lock *lp)
595 {
596         kfree(lp->lvb);
597         lp->lvb = NULL;
598         lp->lksb.sb_lvbptr = NULL;
599 }
600
601 static int gdlm_ast_wait(void *word)
602 {
603         schedule();
604         return 0;
605 }
606
607 /* This can do a synchronous dlm request (requiring a lock_dlm thread to get
608    the completion) because gfs won't call hold_lvb() during a callback (from
609    the context of a lock_dlm thread). */
610
611 static int hold_null_lock(struct gdlm_lock *lp)
612 {
613         struct gdlm_lock *lpn = NULL;
614         int error;
615
616         if (lp->hold_null) {
617                 printk(KERN_INFO "lock_dlm: lvb already held\n");
618                 return 0;
619         }
620
621         error = gdlm_create_lp(lp->ls, &lp->lockname, &lpn);
622         if (error)
623                 goto out;
624
625         lpn->lksb.sb_lvbptr = junk_lvb;
626         lpn->lvb = junk_lvb;
627
628         lpn->req = DLM_LOCK_NL;
629         lpn->lkf = DLM_LKF_VALBLK | DLM_LKF_EXPEDITE;
630         set_bit(LFL_NOBAST, &lpn->flags);
631         set_bit(LFL_INLOCK, &lpn->flags);
632         set_bit(LFL_AST_WAIT, &lpn->flags);
633
634         gdlm_do_lock(lpn);
635         wait_on_bit(&lpn->flags, LFL_AST_WAIT, gdlm_ast_wait, TASK_UNINTERRUPTIBLE);
636         error = lpn->lksb.sb_status;
637         if (error) {
638                 printk(KERN_INFO "lock_dlm: hold_null_lock dlm error %d\n",
639                        error);
640                 gdlm_delete_lp(lpn);
641                 lpn = NULL;
642         }
643 out:
644         lp->hold_null = lpn;
645         return error;
646 }
647
648 /* This cannot do a synchronous dlm request (requiring a lock_dlm thread to get
649    the completion) because gfs may call unhold_lvb() during a callback (from
650    the context of a lock_dlm thread) which could cause a deadlock since the
651    other lock_dlm thread could be engaged in recovery. */
652
653 static void unhold_null_lock(struct gdlm_lock *lp)
654 {
655         struct gdlm_lock *lpn = lp->hold_null;
656
657         gdlm_assert(lpn, "%x,%llx", lp->lockname.ln_type,
658                     (unsigned long long)lp->lockname.ln_number);
659         lpn->lksb.sb_lvbptr = NULL;
660         lpn->lvb = NULL;
661         set_bit(LFL_UNLOCK_DELETE, &lpn->flags);
662         gdlm_do_unlock(lpn);
663         lp->hold_null = NULL;
664 }
665
666 /* Acquire a NL lock because gfs requires the value block to remain
667    intact on the resource while the lvb is "held" even if it's holding no locks
668    on the resource. */
669
670 int gdlm_hold_lvb(void *lock, char **lvbp)
671 {
672         struct gdlm_lock *lp = lock;
673         int error;
674
675         error = gdlm_add_lvb(lp);
676         if (error)
677                 return error;
678
679         *lvbp = lp->lvb;
680
681         error = hold_null_lock(lp);
682         if (error)
683                 gdlm_del_lvb(lp);
684
685         return error;
686 }
687
688 void gdlm_unhold_lvb(void *lock, char *lvb)
689 {
690         struct gdlm_lock *lp = lock;
691
692         unhold_null_lock(lp);
693         gdlm_del_lvb(lp);
694 }
695
696 void gdlm_submit_delayed(struct gdlm_ls *ls)
697 {
698         struct gdlm_lock *lp, *safe;
699
700         spin_lock(&ls->async_lock);
701         list_for_each_entry_safe(lp, safe, &ls->delayed, delay_list) {
702                 list_del_init(&lp->delay_list);
703                 list_add_tail(&lp->delay_list, &ls->submit);
704         }
705         spin_unlock(&ls->async_lock);
706         wake_up(&ls->thread_wait);
707 }
708