1 /******************************************************************************
2 *******************************************************************************
4 ** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
5 ** Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
7 ** This copyrighted material is made available to anyone wishing to use,
8 ** modify, copy, or redistribute it subject to the terms and conditions
9 ** of the GNU General Public License v.2.
11 *******************************************************************************
12 ******************************************************************************/
17 * This is the userland interface to the DLM.
19 * The locking is done via a misc char device (find the
20 * registered minor number in /proc/misc).
22 * User code should not use this interface directly but
23 * call the library routines in libdlm.a instead.
27 #include <linux/miscdevice.h>
28 #include <linux/init.h>
29 #include <linux/wait.h>
30 #include <linux/module.h>
31 #include <linux/file.h>
33 #include <linux/poll.h>
34 #include <linux/signal.h>
35 #include <linux/spinlock.h>
36 #include <linux/idr.h>
38 #include <linux/dlm.h>
39 #include <linux/dlm_device.h>
41 #include "lvb_table.h"
43 static struct file_operations _dlm_fops;
44 static const char *name_prefix="dlm";
45 static struct list_head user_ls_list;
46 static struct mutex user_ls_lock;
48 /* Lock infos are stored in here indexed by lock ID */
49 static DEFINE_IDR(lockinfo_idr);
50 static rwlock_t lockinfo_lock;
52 /* Flags in li_flags */
53 #define LI_FLAG_COMPLETE 1
54 #define LI_FLAG_FIRSTLOCK 2
55 #define LI_FLAG_PERSISTENT 3
56 #define LI_FLAG_ONLIST 4
58 /* flags in ls_flags*/
59 #define LS_FLAG_DELETED 1
60 #define LS_FLAG_AUTOFREE 2
63 #define LOCKINFO_MAGIC 0x53595324
70 struct dlm_lksb li_lksb;
71 wait_queue_head_t li_waitq;
72 unsigned long li_flags;
73 void __user *li_castparam;
74 void __user *li_castaddr;
75 void __user *li_bastparam;
76 void __user *li_bastaddr;
77 void __user *li_pend_bastparam;
78 void __user *li_pend_bastaddr;
79 struct list_head li_ownerqueue;
80 struct file_info *li_file;
81 struct dlm_lksb __user *li_user_lksb;
82 struct completion li_firstcomp;
85 /* A queued AST no less */
87 struct dlm_lock_result result;
88 struct list_head list;
90 uint32_t progress; /* How much has been read */
93 /* One of these per userland lockspace */
99 /* Passed into misc_register() */
100 struct miscdevice ls_miscinfo;
101 struct list_head ls_list;
104 /* misc_device info for the control device */
105 static struct miscdevice ctl_device;
108 * Stuff we hang off the file struct.
109 * The first two are to cope with unlocking all the
110 * locks help by a process when it dies.
113 struct list_head fi_li_list; /* List of active lock_infos */
114 spinlock_t fi_li_lock;
115 struct list_head fi_ast_list; /* Queue of ASTs to be delivered */
116 spinlock_t fi_ast_lock;
117 wait_queue_head_t fi_wait;
118 struct user_ls *fi_ls;
119 atomic_t fi_refcnt; /* Number of users */
120 unsigned long fi_flags; /* Bit 1 means the device is open */
124 /* get and put ops for file_info.
125 Actually I don't really like "get" and "put", but everyone
126 else seems to use them and I can't think of anything
127 nicer at the moment */
128 static void get_file_info(struct file_info *f)
130 atomic_inc(&f->fi_refcnt);
133 static void put_file_info(struct file_info *f)
135 if (atomic_dec_and_test(&f->fi_refcnt))
139 static void release_lockinfo(struct lock_info *li)
141 put_file_info(li->li_file);
143 write_lock(&lockinfo_lock);
144 idr_remove(&lockinfo_idr, li->li_lksb.sb_lkid);
145 write_unlock(&lockinfo_lock);
147 if (li->li_lksb.sb_lvbptr)
148 kfree(li->li_lksb.sb_lvbptr);
151 module_put(THIS_MODULE);
154 static struct lock_info *get_lockinfo(uint32_t lockid)
156 struct lock_info *li;
158 read_lock(&lockinfo_lock);
159 li = idr_find(&lockinfo_idr, lockid);
160 read_unlock(&lockinfo_lock);
165 static int add_lockinfo(struct lock_info *li)
171 write_lock(&lockinfo_lock);
173 if (idr_find(&lockinfo_idr, li->li_lksb.sb_lkid))
177 r = idr_pre_get(&lockinfo_idr, GFP_KERNEL);
181 r = idr_get_new_above(&lockinfo_idr, li, li->li_lksb.sb_lkid, &n);
185 if (n != li->li_lksb.sb_lkid) {
186 idr_remove(&lockinfo_idr, n);
193 write_unlock(&lockinfo_lock);
199 static struct user_ls *__find_lockspace(int minor)
201 struct user_ls *lsinfo;
203 list_for_each_entry(lsinfo, &user_ls_list, ls_list) {
204 if (lsinfo->ls_miscinfo.minor == minor)
210 /* Find a lockspace struct given the device minor number */
211 static struct user_ls *find_lockspace(int minor)
213 struct user_ls *lsinfo;
215 mutex_lock(&user_ls_lock);
216 lsinfo = __find_lockspace(minor);
217 mutex_unlock(&user_ls_lock);
222 static void add_lockspace_to_list(struct user_ls *lsinfo)
224 mutex_lock(&user_ls_lock);
225 list_add(&lsinfo->ls_list, &user_ls_list);
226 mutex_unlock(&user_ls_lock);
229 /* Register a lockspace with the DLM and create a misc
230 device for userland to access it */
231 static int register_lockspace(char *name, struct user_ls **ls, int flags)
233 struct user_ls *newls;
237 namelen = strlen(name)+strlen(name_prefix)+2;
239 newls = kzalloc(sizeof(struct user_ls), GFP_KERNEL);
243 newls->ls_miscinfo.name = kzalloc(namelen, GFP_KERNEL);
244 if (!newls->ls_miscinfo.name) {
249 status = dlm_new_lockspace(name, strlen(name), &newls->ls_lockspace, 0,
252 kfree(newls->ls_miscinfo.name);
257 snprintf((char*)newls->ls_miscinfo.name, namelen, "%s_%s",
260 newls->ls_miscinfo.fops = &_dlm_fops;
261 newls->ls_miscinfo.minor = MISC_DYNAMIC_MINOR;
263 status = misc_register(&newls->ls_miscinfo);
265 printk(KERN_ERR "dlm: misc register failed for %s\n", name);
266 dlm_release_lockspace(newls->ls_lockspace, 0);
267 kfree(newls->ls_miscinfo.name);
272 if (flags & DLM_USER_LSFLG_AUTOFREE)
273 set_bit(LS_FLAG_AUTOFREE, &newls->ls_flags);
275 add_lockspace_to_list(newls);
280 /* Called with the user_ls_lock mutex held */
281 static int unregister_lockspace(struct user_ls *lsinfo, int force)
285 status = dlm_release_lockspace(lsinfo->ls_lockspace, force);
289 status = misc_deregister(&lsinfo->ls_miscinfo);
293 list_del(&lsinfo->ls_list);
294 set_bit(LS_FLAG_DELETED, &lsinfo->ls_flags);
295 lsinfo->ls_lockspace = NULL;
296 if (atomic_read(&lsinfo->ls_refcnt) == 0) {
297 kfree(lsinfo->ls_miscinfo.name);
304 /* Add it to userland's AST queue */
305 static void add_to_astqueue(struct lock_info *li, void *astaddr, void *astparam,
308 struct ast_info *ast = kzalloc(sizeof(struct ast_info), GFP_KERNEL);
312 ast->result.user_astparam = astparam;
313 ast->result.user_astaddr = astaddr;
314 ast->result.user_lksb = li->li_user_lksb;
315 memcpy(&ast->result.lksb, &li->li_lksb, sizeof(struct dlm_lksb));
316 ast->lvb_updated = lvb_updated;
318 spin_lock(&li->li_file->fi_ast_lock);
319 list_add_tail(&ast->list, &li->li_file->fi_ast_list);
320 spin_unlock(&li->li_file->fi_ast_lock);
321 wake_up_interruptible(&li->li_file->fi_wait);
324 static void bast_routine(void *param, int mode)
326 struct lock_info *li = param;
328 if (li && li->li_bastaddr)
329 add_to_astqueue(li, li->li_bastaddr, li->li_bastparam, 0);
333 * This is the kernel's AST routine.
334 * All lock, unlock & query operations complete here.
335 * The only syncronous ops are those done during device close.
337 static void ast_routine(void *param)
339 struct lock_info *li = param;
341 /* Param may be NULL if a persistent lock is unlocked by someone else */
345 /* If this is a succesful conversion then activate the blocking ast
346 * args from the conversion request */
347 if (!test_bit(LI_FLAG_FIRSTLOCK, &li->li_flags) &&
348 li->li_lksb.sb_status == 0) {
350 li->li_bastparam = li->li_pend_bastparam;
351 li->li_bastaddr = li->li_pend_bastaddr;
352 li->li_pend_bastaddr = NULL;
355 /* If it's an async request then post data to the user's AST queue. */
356 if (li->li_castaddr) {
359 /* See if the lvb has been updated */
360 if (dlm_lvb_operations[li->li_grmode+1][li->li_rqmode+1] == 1)
363 if (li->li_lksb.sb_status == 0)
364 li->li_grmode = li->li_rqmode;
366 /* Only queue AST if the device is still open */
367 if (test_bit(1, &li->li_file->fi_flags))
368 add_to_astqueue(li, li->li_castaddr, li->li_castparam,
371 /* If it's a new lock operation that failed, then
372 * remove it from the owner queue and free the
375 if (test_and_clear_bit(LI_FLAG_FIRSTLOCK, &li->li_flags) &&
376 li->li_lksb.sb_status != 0) {
378 /* Wait till dlm_lock() has finished */
379 wait_for_completion(&li->li_firstcomp);
381 spin_lock(&li->li_file->fi_li_lock);
382 list_del(&li->li_ownerqueue);
383 clear_bit(LI_FLAG_ONLIST, &li->li_flags);
384 spin_unlock(&li->li_file->fi_li_lock);
385 release_lockinfo(li);
388 /* Free unlocks & queries */
389 if (li->li_lksb.sb_status == -DLM_EUNLOCK ||
390 li->li_cmd == DLM_USER_QUERY) {
391 release_lockinfo(li);
394 /* Synchronous request, just wake up the caller */
395 set_bit(LI_FLAG_COMPLETE, &li->li_flags);
396 wake_up_interruptible(&li->li_waitq);
401 * Wait for the lock op to complete and return the status.
403 static int wait_for_ast(struct lock_info *li)
405 /* Wait for the AST routine to complete */
406 set_task_state(current, TASK_INTERRUPTIBLE);
407 while (!test_bit(LI_FLAG_COMPLETE, &li->li_flags))
410 set_task_state(current, TASK_RUNNING);
412 return li->li_lksb.sb_status;
416 /* Open on control device */
417 static int dlm_ctl_open(struct inode *inode, struct file *file)
419 file->private_data = NULL;
423 /* Close on control device */
424 static int dlm_ctl_close(struct inode *inode, struct file *file)
429 /* Open on lockspace device */
430 static int dlm_open(struct inode *inode, struct file *file)
433 struct user_ls *lsinfo;
435 lsinfo = find_lockspace(iminor(inode));
439 f = kzalloc(sizeof(struct file_info), GFP_KERNEL);
443 atomic_inc(&lsinfo->ls_refcnt);
444 INIT_LIST_HEAD(&f->fi_li_list);
445 INIT_LIST_HEAD(&f->fi_ast_list);
446 spin_lock_init(&f->fi_li_lock);
447 spin_lock_init(&f->fi_ast_lock);
448 init_waitqueue_head(&f->fi_wait);
452 set_bit(1, &f->fi_flags);
454 file->private_data = f;
459 /* Check the user's version matches ours */
460 static int check_version(struct dlm_write_request *req)
462 if (req->version[0] != DLM_DEVICE_VERSION_MAJOR ||
463 (req->version[0] == DLM_DEVICE_VERSION_MAJOR &&
464 req->version[1] > DLM_DEVICE_VERSION_MINOR)) {
466 printk(KERN_DEBUG "dlm: process %s (%d) version mismatch "
467 "user (%d.%d.%d) kernel (%d.%d.%d)\n",
473 DLM_DEVICE_VERSION_MAJOR,
474 DLM_DEVICE_VERSION_MINOR,
475 DLM_DEVICE_VERSION_PATCH);
481 /* Close on lockspace device */
482 static int dlm_close(struct inode *inode, struct file *file)
484 struct file_info *f = file->private_data;
486 struct lock_info *old_li, *safe;
489 struct user_ls *lsinfo;
490 DECLARE_WAITQUEUE(wq, current);
492 lsinfo = find_lockspace(iminor(inode));
496 /* Mark this closed so that ASTs will not be delivered any more */
497 clear_bit(1, &f->fi_flags);
499 /* Block signals while we are doing this */
500 sigfillset(&allsigs);
501 sigprocmask(SIG_BLOCK, &allsigs, &tmpsig);
503 /* We use our own lock_info struct here, so that any
504 * outstanding "real" ASTs will be delivered with the
505 * corresponding "real" params, thus freeing the lock_info
506 * that belongs the lock. This catches the corner case where
507 * a lock is BUSY when we try to unlock it here
509 memset(&li, 0, sizeof(li));
510 clear_bit(LI_FLAG_COMPLETE, &li.li_flags);
511 init_waitqueue_head(&li.li_waitq);
512 add_wait_queue(&li.li_waitq, &wq);
515 * Free any outstanding locks, they are on the
516 * list in LIFO order so there should be no problems
517 * about unlocking parents before children.
519 list_for_each_entry_safe(old_li, safe, &f->fi_li_list, li_ownerqueue) {
523 /* Don't unlock persistent locks, just mark them orphaned */
524 if (test_bit(LI_FLAG_PERSISTENT, &old_li->li_flags)) {
525 list_del(&old_li->li_ownerqueue);
527 /* Update master copy */
528 /* TODO: Check locking core updates the local and
529 remote ORPHAN flags */
530 li.li_lksb.sb_lkid = old_li->li_lksb.sb_lkid;
531 status = dlm_lock(f->fi_ls->ls_lockspace,
532 old_li->li_grmode, &li.li_lksb,
533 DLM_LKF_CONVERT|DLM_LKF_ORPHAN,
534 NULL, 0, 0, ast_routine, NULL, NULL);
536 printk("dlm: Error orphaning lock %x: %d\n",
537 old_li->li_lksb.sb_lkid, status);
539 /* But tidy our references in it */
540 release_lockinfo(old_li);
544 clear_bit(LI_FLAG_COMPLETE, &li.li_flags);
546 flags = DLM_LKF_FORCEUNLOCK;
547 if (old_li->li_grmode >= DLM_LOCK_PW)
548 flags |= DLM_LKF_IVVALBLK;
550 status = dlm_unlock(f->fi_ls->ls_lockspace,
551 old_li->li_lksb.sb_lkid, flags,
554 /* Must wait for it to complete as the next lock could be its
559 /* Unlock suceeded, free the lock_info struct. */
561 release_lockinfo(old_li);
564 remove_wait_queue(&li.li_waitq, &wq);
567 * If this is the last reference to the lockspace
568 * then free the struct. If it's an AUTOFREE lockspace
569 * then free the whole thing.
571 mutex_lock(&user_ls_lock);
572 if (atomic_dec_and_test(&lsinfo->ls_refcnt)) {
574 if (lsinfo->ls_lockspace) {
575 if (test_bit(LS_FLAG_AUTOFREE, &lsinfo->ls_flags)) {
576 unregister_lockspace(lsinfo, 1);
579 kfree(lsinfo->ls_miscinfo.name);
583 mutex_unlock(&user_ls_lock);
586 /* Restore signals */
587 sigprocmask(SIG_SETMASK, &tmpsig, NULL);
593 static int do_user_create_lockspace(struct file_info *fi, uint8_t cmd,
594 struct dlm_lspace_params *kparams)
597 struct user_ls *lsinfo;
599 if (!capable(CAP_SYS_ADMIN))
602 status = register_lockspace(kparams->name, &lsinfo, kparams->flags);
604 /* If it succeeded then return the minor number */
606 status = lsinfo->ls_miscinfo.minor;
611 static int do_user_remove_lockspace(struct file_info *fi, uint8_t cmd,
612 struct dlm_lspace_params *kparams)
616 struct user_ls *lsinfo;
618 if (!capable(CAP_SYS_ADMIN))
621 mutex_lock(&user_ls_lock);
622 lsinfo = __find_lockspace(kparams->minor);
624 mutex_unlock(&user_ls_lock);
628 if (kparams->flags & DLM_USER_LSFLG_FORCEFREE)
631 status = unregister_lockspace(lsinfo, force);
632 mutex_unlock(&user_ls_lock);
637 /* Read call, might block if no ASTs are waiting.
638 * It will only ever return one message at a time, regardless
639 * of how many are pending.
641 static ssize_t dlm_read(struct file *file, char __user *buffer, size_t count,
644 struct file_info *fi = file->private_data;
645 struct ast_info *ast;
648 DECLARE_WAITQUEUE(wait, current);
650 if (count < sizeof(struct dlm_lock_result))
653 spin_lock(&fi->fi_ast_lock);
654 if (list_empty(&fi->fi_ast_list)) {
657 * Return EOF if the lockspace been deleted.
659 if (test_bit(LS_FLAG_DELETED, &fi->fi_ls->ls_flags))
662 if (file->f_flags & O_NONBLOCK) {
663 spin_unlock(&fi->fi_ast_lock);
667 add_wait_queue(&fi->fi_wait, &wait);
670 set_current_state(TASK_INTERRUPTIBLE);
671 if (list_empty(&fi->fi_ast_list) &&
672 !signal_pending(current)) {
674 spin_unlock(&fi->fi_ast_lock);
676 spin_lock(&fi->fi_ast_lock);
680 current->state = TASK_RUNNING;
681 remove_wait_queue(&fi->fi_wait, &wait);
683 if (signal_pending(current)) {
684 spin_unlock(&fi->fi_ast_lock);
689 ast = list_entry(fi->fi_ast_list.next, struct ast_info, list);
690 list_del(&ast->list);
691 spin_unlock(&fi->fi_ast_lock);
693 /* Work out the size of the returned data */
694 data_size = sizeof(struct dlm_lock_result);
695 if (ast->lvb_updated && ast->result.lksb.sb_lvbptr)
696 data_size += DLM_USER_LVB_LEN;
698 offset = sizeof(struct dlm_lock_result);
700 /* Room for the extended data ? */
701 if (count >= data_size) {
703 if (ast->lvb_updated && ast->result.lksb.sb_lvbptr) {
704 if (copy_to_user(buffer+offset,
705 ast->result.lksb.sb_lvbptr,
708 ast->result.lvb_offset = offset;
709 offset += DLM_USER_LVB_LEN;
713 ast->result.length = data_size;
714 /* Copy the header now it has all the offsets in it */
715 if (copy_to_user(buffer, &ast->result, sizeof(struct dlm_lock_result)))
718 /* If we only returned a header and there's more to come then put it
720 if (count < data_size) {
721 spin_lock(&fi->fi_ast_lock);
722 list_add(&ast->list, &fi->fi_ast_list);
723 spin_unlock(&fi->fi_ast_lock);
729 static unsigned int dlm_poll(struct file *file, poll_table *wait)
731 struct file_info *fi = file->private_data;
733 poll_wait(file, &fi->fi_wait, wait);
735 spin_lock(&fi->fi_ast_lock);
736 if (!list_empty(&fi->fi_ast_list)) {
737 spin_unlock(&fi->fi_ast_lock);
738 return POLLIN | POLLRDNORM;
741 spin_unlock(&fi->fi_ast_lock);
745 static struct lock_info *allocate_lockinfo(struct file_info *fi, uint8_t cmd,
746 struct dlm_lock_params *kparams)
748 struct lock_info *li;
750 if (!try_module_get(THIS_MODULE))
753 li = kzalloc(sizeof(struct lock_info), GFP_KERNEL);
755 li->li_magic = LOCKINFO_MAGIC;
761 li->li_pend_bastparam = NULL;
762 li->li_pend_bastaddr = NULL;
763 li->li_castaddr = NULL;
764 li->li_castparam = NULL;
765 li->li_lksb.sb_lvbptr = NULL;
766 li->li_bastaddr = kparams->bastaddr;
767 li->li_bastparam = kparams->bastparam;
774 static int do_user_lock(struct file_info *fi, uint8_t cmd,
775 struct dlm_lock_params *kparams)
777 struct lock_info *li;
781 * Validate things that we need to have correct.
783 if (!kparams->castaddr)
789 /* Persistent child locks are not available yet */
790 if ((kparams->flags & DLM_LKF_PERSISTENT) && kparams->parent)
793 /* For conversions, there should already be a lockinfo struct,
794 unless we are adopting an orphaned persistent lock */
795 if (kparams->flags & DLM_LKF_CONVERT) {
797 li = get_lockinfo(kparams->lkid);
799 /* If this is a persistent lock we will have to create a
801 if (!li && (kparams->flags & DLM_LKF_PERSISTENT)) {
802 li = allocate_lockinfo(fi, cmd, kparams);
806 li->li_lksb.sb_lkid = kparams->lkid;
807 li->li_castaddr = kparams->castaddr;
808 li->li_castparam = kparams->castparam;
810 /* OK, this isn't exactly a FIRSTLOCK but it is the
811 first time we've used this lockinfo, and if things
812 fail we want rid of it */
813 init_completion(&li->li_firstcomp);
814 set_bit(LI_FLAG_FIRSTLOCK, &li->li_flags);
817 /* TODO: do a query to get the current state ?? */
822 if (li->li_magic != LOCKINFO_MAGIC)
825 /* For conversions don't overwrite the current blocking AST
827 a) if a blocking AST fires before the conversion is queued
828 it runs the current handler
829 b) if the conversion is cancelled, the original blocking AST
830 declaration is active
831 The pend_ info is made active when the conversion
834 li->li_pend_bastaddr = kparams->bastaddr;
835 li->li_pend_bastparam = kparams->bastparam;
837 li = allocate_lockinfo(fi, cmd, kparams);
841 /* Allow us to complete our work before
842 the AST routine runs. In fact we only need (and use) this
843 when the initial lock fails */
844 init_completion(&li->li_firstcomp);
845 set_bit(LI_FLAG_FIRSTLOCK, &li->li_flags);
848 li->li_user_lksb = kparams->lksb;
849 li->li_castaddr = kparams->castaddr;
850 li->li_castparam = kparams->castparam;
851 li->li_lksb.sb_lkid = kparams->lkid;
852 li->li_rqmode = kparams->mode;
853 if (kparams->flags & DLM_LKF_PERSISTENT)
854 set_bit(LI_FLAG_PERSISTENT, &li->li_flags);
856 /* Copy in the value block */
857 if (kparams->flags & DLM_LKF_VALBLK) {
858 if (!li->li_lksb.sb_lvbptr) {
859 li->li_lksb.sb_lvbptr = kmalloc(DLM_USER_LVB_LEN,
861 if (!li->li_lksb.sb_lvbptr) {
867 memcpy(li->li_lksb.sb_lvbptr, kparams->lvb, DLM_USER_LVB_LEN);
871 status = dlm_lock(fi->fi_ls->ls_lockspace,
872 kparams->mode, &li->li_lksb,
874 kparams->name, kparams->namelen,
878 (li->li_pend_bastaddr || li->li_bastaddr) ?
879 bast_routine : NULL);
883 /* If it succeeded (this far) with a new lock then keep track of
884 it on the file's lockinfo list */
885 if (!status && test_bit(LI_FLAG_FIRSTLOCK, &li->li_flags)) {
887 spin_lock(&fi->fi_li_lock);
888 list_add(&li->li_ownerqueue, &fi->fi_li_list);
889 set_bit(LI_FLAG_ONLIST, &li->li_flags);
890 spin_unlock(&fi->fi_li_lock);
891 if (add_lockinfo(li))
892 printk(KERN_WARNING "Add lockinfo failed\n");
894 complete(&li->li_firstcomp);
897 /* Return the lockid as the user needs it /now/ */
898 return li->li_lksb.sb_lkid;
901 if (test_bit(LI_FLAG_FIRSTLOCK, &li->li_flags))
902 release_lockinfo(li);
907 static int do_user_unlock(struct file_info *fi, uint8_t cmd,
908 struct dlm_lock_params *kparams)
910 struct lock_info *li;
912 int convert_cancel = 0;
914 li = get_lockinfo(kparams->lkid);
916 li = allocate_lockinfo(fi, cmd, kparams);
919 spin_lock(&fi->fi_li_lock);
920 list_add(&li->li_ownerqueue, &fi->fi_li_list);
921 set_bit(LI_FLAG_ONLIST, &li->li_flags);
922 spin_unlock(&fi->fi_li_lock);
925 if (li->li_magic != LOCKINFO_MAGIC)
928 li->li_user_lksb = kparams->lksb;
929 li->li_castparam = kparams->castparam;
932 /* Cancelling a conversion doesn't remove the lock...*/
933 if (kparams->flags & DLM_LKF_CANCEL && li->li_grmode != -1)
936 /* Wait until dlm_lock() has completed */
937 if (!test_bit(LI_FLAG_ONLIST, &li->li_flags)) {
938 wait_for_completion(&li->li_firstcomp);
941 /* dlm_unlock() passes a 0 for castaddr which means don't overwrite
942 the existing li_castaddr as that's the completion routine for
943 unlocks. dlm_unlock_wait() specifies a new AST routine to be
944 executed when the unlock completes. */
945 if (kparams->castaddr)
946 li->li_castaddr = kparams->castaddr;
948 /* Use existing lksb & astparams */
949 status = dlm_unlock(fi->fi_ls->ls_lockspace,
951 kparams->flags, &li->li_lksb, li);
953 if (!status && !convert_cancel) {
954 spin_lock(&fi->fi_li_lock);
955 list_del(&li->li_ownerqueue);
956 clear_bit(LI_FLAG_ONLIST, &li->li_flags);
957 spin_unlock(&fi->fi_li_lock);
963 /* Write call, submit a locking request */
964 static ssize_t dlm_write(struct file *file, const char __user *buffer,
965 size_t count, loff_t *ppos)
967 struct file_info *fi = file->private_data;
968 struct dlm_write_request *kparams;
973 /* -1 because lock name is optional */
974 if (count < sizeof(struct dlm_write_request)-1)
977 /* Has the lockspace been deleted */
978 if (fi && test_bit(LS_FLAG_DELETED, &fi->fi_ls->ls_flags))
981 kparams = kmalloc(count, GFP_KERNEL);
986 /* Get the command info */
987 if (copy_from_user(kparams, buffer, count))
991 if (check_version(kparams))
994 /* Block signals while we are doing this */
995 sigfillset(&allsigs);
996 sigprocmask(SIG_BLOCK, &allsigs, &tmpsig);
999 switch (kparams->cmd)
1002 if (!fi) goto out_sig;
1003 status = do_user_lock(fi, kparams->cmd, &kparams->i.lock);
1006 case DLM_USER_UNLOCK:
1007 if (!fi) goto out_sig;
1008 status = do_user_unlock(fi, kparams->cmd, &kparams->i.lock);
1011 case DLM_USER_CREATE_LOCKSPACE:
1012 if (fi) goto out_sig;
1013 status = do_user_create_lockspace(fi, kparams->cmd,
1014 &kparams->i.lspace);
1017 case DLM_USER_REMOVE_LOCKSPACE:
1018 if (fi) goto out_sig;
1019 status = do_user_remove_lockspace(fi, kparams->cmd,
1020 &kparams->i.lspace);
1023 printk("Unknown command passed to DLM device : %d\n",
1029 /* Restore signals */
1030 sigprocmask(SIG_SETMASK, &tmpsig, NULL);
1031 recalc_sigpending();
1041 static struct file_operations _dlm_fops = {
1043 .release = dlm_close,
1047 .owner = THIS_MODULE,
1050 static struct file_operations _dlm_ctl_fops = {
1051 .open = dlm_ctl_open,
1052 .release = dlm_ctl_close,
1054 .owner = THIS_MODULE,
1058 * Create control device
1060 static int __init dlm_device_init(void)
1064 INIT_LIST_HEAD(&user_ls_list);
1065 mutex_init(&user_ls_lock);
1066 rwlock_init(&lockinfo_lock);
1068 ctl_device.name = "dlm-control";
1069 ctl_device.fops = &_dlm_ctl_fops;
1070 ctl_device.minor = MISC_DYNAMIC_MINOR;
1072 r = misc_register(&ctl_device);
1074 printk(KERN_ERR "dlm: misc_register failed for control dev\n");
1081 static void __exit dlm_device_exit(void)
1083 misc_deregister(&ctl_device);
1086 MODULE_DESCRIPTION("Distributed Lock Manager device interface");
1087 MODULE_AUTHOR("Red Hat, Inc.");
1088 MODULE_LICENSE("GPL");
1090 module_init(dlm_device_init);
1091 module_exit(dlm_device_exit);