4 * Copyright (C) International Business Machines Corp., 2002,2007
5 * Author(s): Steve French (sfrench@us.ibm.com)
7 * Common Internet FileSystem (CIFS) client
9 * This library is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU Lesser General Public License as published
11 * by the Free Software Foundation; either version 2.1 of the License, or
12 * (at your option) any later version.
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
17 * the GNU Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public License
20 * along with this library; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 /* Note that BB means BUGBUG (ie something to fix eventually) */
26 #include <linux/module.h>
28 #include <linux/mount.h>
29 #include <linux/slab.h>
30 #include <linux/init.h>
31 #include <linux/list.h>
32 #include <linux/seq_file.h>
33 #include <linux/vfs.h>
34 #include <linux/mempool.h>
35 #include <linux/delay.h>
36 #include <linux/kthread.h>
37 #include <linux/freezer.h>
40 #define DECLARE_GLOBALS_HERE
42 #include "cifsproto.h"
43 #include "cifs_debug.h"
44 #include "cifs_fs_sb.h"
46 #include <linux/key-type.h>
47 #include "cifs_spnego.h"
48 #define CIFS_MAGIC_NUMBER 0xFF534D42 /* the first four bytes of SMB PDUs */
50 #ifdef CONFIG_CIFS_QUOTA
51 static struct quotactl_ops cifs_quotactl_ops;
57 unsigned int oplockEnabled = 1;
58 unsigned int experimEnabled = 0;
59 unsigned int linuxExtEnabled = 1;
60 unsigned int lookupCacheEnabled = 1;
61 unsigned int multiuser_mount = 0;
62 unsigned int extended_security = CIFSSEC_DEF;
63 /* unsigned int ntlmv2_support = 0; */
64 unsigned int sign_CIFS_PDUs = 1;
65 extern struct task_struct *oplockThread; /* remove sparse warning */
66 struct task_struct *oplockThread = NULL;
67 /* extern struct task_struct * dnotifyThread; remove sparse warning */
68 static struct task_struct *dnotifyThread = NULL;
69 static const struct super_operations cifs_super_ops;
70 unsigned int CIFSMaxBufSize = CIFS_MAX_MSGSIZE;
71 module_param(CIFSMaxBufSize, int, 0);
72 MODULE_PARM_DESC(CIFSMaxBufSize, "Network buffer size (not including header). "
73 "Default: 16384 Range: 8192 to 130048");
74 unsigned int cifs_min_rcv = CIFS_MIN_RCV_POOL;
75 module_param(cifs_min_rcv, int, 0);
76 MODULE_PARM_DESC(cifs_min_rcv, "Network buffers in pool. Default: 4 Range: "
78 unsigned int cifs_min_small = 30;
79 module_param(cifs_min_small, int, 0);
80 MODULE_PARM_DESC(cifs_min_small, "Small network buffers in pool. Default: 30 "
82 unsigned int cifs_max_pending = CIFS_MAX_REQ;
83 module_param(cifs_max_pending, int, 0);
84 MODULE_PARM_DESC(cifs_max_pending, "Simultaneous requests to server. "
85 "Default: 50 Range: 2 to 256");
87 extern mempool_t *cifs_sm_req_poolp;
88 extern mempool_t *cifs_req_poolp;
89 extern mempool_t *cifs_mid_poolp;
91 extern struct kmem_cache *cifs_oplock_cachep;
94 cifs_read_super(struct super_block *sb, void *data,
95 const char *devname, int silent)
98 struct cifs_sb_info *cifs_sb;
101 /* BB should we make this contingent on mount parm? */
102 sb->s_flags |= MS_NODIRATIME | MS_NOATIME;
103 sb->s_fs_info = kzalloc(sizeof(struct cifs_sb_info), GFP_KERNEL);
104 cifs_sb = CIFS_SB(sb);
108 rc = cifs_mount(sb, cifs_sb, data, devname);
113 ("cifs_mount failed w/return code = %d", rc));
114 goto out_mount_failed;
117 sb->s_magic = CIFS_MAGIC_NUMBER;
118 sb->s_op = &cifs_super_ops;
119 /* if (cifs_sb->tcon->ses->server->maxBuf > MAX_CIFS_HDR_SIZE + 512)
121 cifs_sb->tcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE; */
122 #ifdef CONFIG_CIFS_QUOTA
123 sb->s_qcop = &cifs_quotactl_ops;
125 sb->s_blocksize = CIFS_MAX_MSGSIZE;
126 sb->s_blocksize_bits = 14; /* default 2**14 = CIFS_MAX_MSGSIZE */
127 inode = iget(sb, ROOT_I);
134 sb->s_root = d_alloc_root(inode);
141 #ifdef CONFIG_CIFS_EXPERIMENTAL
142 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
143 cFYI(1, ("export ops supported"));
144 sb->s_export_op = &cifs_export_ops;
146 #endif /* EXPERIMENTAL */
151 cERROR(1, ("cifs_read_super: get root inode failed"));
157 if (cifs_sb->local_nls)
158 unload_nls(cifs_sb->local_nls);
165 cifs_put_super(struct super_block *sb)
168 struct cifs_sb_info *cifs_sb;
170 cFYI(1, ("In cifs_put_super"));
171 cifs_sb = CIFS_SB(sb);
172 if (cifs_sb == NULL) {
173 cFYI(1, ("Empty cifs superblock info passed to unmount"));
176 rc = cifs_umount(sb, cifs_sb);
178 cERROR(1, ("cifs_umount failed with return code %d", rc));
180 unload_nls(cifs_sb->local_nls);
186 cifs_statfs(struct dentry *dentry, struct kstatfs *buf)
188 struct super_block *sb = dentry->d_sb;
190 int rc = -EOPNOTSUPP;
191 struct cifs_sb_info *cifs_sb;
192 struct cifsTconInfo *pTcon;
196 cifs_sb = CIFS_SB(sb);
197 pTcon = cifs_sb->tcon;
199 buf->f_type = CIFS_MAGIC_NUMBER;
201 /* instead could get the real value via SMB_QUERY_FS_ATTRIBUTE_INFO */
202 buf->f_namelen = PATH_MAX; /* PATH_MAX may be too long - it would
203 presumably be total path, but note
204 that some servers (includinng Samba 3)
205 have a shorter maximum path */
206 buf->f_files = 0; /* undefined */
207 buf->f_ffree = 0; /* unlimited */
209 /* BB we could add a second check for a QFS Unix capability bit */
210 /* BB FIXME check CIFS_POSIX_EXTENSIONS Unix cap first FIXME BB */
211 if ((pTcon->ses->capabilities & CAP_UNIX) && (CIFS_POSIX_EXTENSIONS &
212 le64_to_cpu(pTcon->fsUnixInfo.Capability)))
213 rc = CIFSSMBQFSPosixInfo(xid, pTcon, buf);
215 /* Only need to call the old QFSInfo if failed
218 if (pTcon->ses->capabilities & CAP_NT_SMBS)
219 rc = CIFSSMBQFSInfo(xid, pTcon, buf); /* not supported by OS2 */
221 /* Some old Windows servers also do not support level 103, retry with
222 older level one if old server failed the previous call or we
223 bypassed it because we detected that this was an older LANMAN sess */
225 rc = SMBOldQFSInfo(xid, pTcon, buf);
229 /* BB get from info in tcon struct at mount time call to QFSAttrInfo */
231 return 0; /* always return success? what if volume is no
235 static int cifs_permission(struct inode *inode, int mask, struct nameidata *nd)
237 struct cifs_sb_info *cifs_sb;
239 cifs_sb = CIFS_SB(inode->i_sb);
241 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM)
243 else /* file mode might have been restricted at mount time
244 on the client (above and beyond ACL on servers) for
245 servers which do not support setting and viewing mode bits,
246 so allowing client to check permissions is useful */
247 return generic_permission(inode, mask, NULL);
250 static struct kmem_cache *cifs_inode_cachep;
251 static struct kmem_cache *cifs_req_cachep;
252 static struct kmem_cache *cifs_mid_cachep;
253 struct kmem_cache *cifs_oplock_cachep;
254 static struct kmem_cache *cifs_sm_req_cachep;
255 mempool_t *cifs_sm_req_poolp;
256 mempool_t *cifs_req_poolp;
257 mempool_t *cifs_mid_poolp;
259 static struct inode *
260 cifs_alloc_inode(struct super_block *sb)
262 struct cifsInodeInfo *cifs_inode;
263 cifs_inode = kmem_cache_alloc(cifs_inode_cachep, GFP_KERNEL);
266 cifs_inode->cifsAttrs = 0x20; /* default */
267 atomic_set(&cifs_inode->inUse, 0);
268 cifs_inode->time = 0;
269 /* Until the file is open and we have gotten oplock
270 info back from the server, can not assume caching of
271 file data or metadata */
272 cifs_inode->clientCanCacheRead = FALSE;
273 cifs_inode->clientCanCacheAll = FALSE;
274 cifs_inode->vfs_inode.i_blkbits = 14; /* 2**14 = CIFS_MAX_MSGSIZE */
276 /* Can not set i_flags here - they get immediately overwritten
277 to zero by the VFS */
278 /* cifs_inode->vfs_inode.i_flags = S_NOATIME | S_NOCMTIME;*/
279 INIT_LIST_HEAD(&cifs_inode->openFileList);
280 return &cifs_inode->vfs_inode;
284 cifs_destroy_inode(struct inode *inode)
286 kmem_cache_free(cifs_inode_cachep, CIFS_I(inode));
290 * cifs_show_options() is for displaying mount options in /proc/mounts.
291 * Not all settable options are displayed but most of the important
295 cifs_show_options(struct seq_file *s, struct vfsmount *m)
297 struct cifs_sb_info *cifs_sb;
299 cifs_sb = CIFS_SB(m->mnt_sb);
303 /* BB add prepath to mount options displayed */
304 seq_printf(s, ",unc=%s", cifs_sb->tcon->treeName);
305 if (cifs_sb->tcon->ses) {
306 if (cifs_sb->tcon->ses->userName)
307 seq_printf(s, ",username=%s",
308 cifs_sb->tcon->ses->userName);
309 if (cifs_sb->tcon->ses->domainName)
310 seq_printf(s, ",domain=%s",
311 cifs_sb->tcon->ses->domainName);
313 if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID) ||
314 !(cifs_sb->tcon->unix_ext))
315 seq_printf(s, ",uid=%d", cifs_sb->mnt_uid);
316 if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID) ||
317 !(cifs_sb->tcon->unix_ext))
318 seq_printf(s, ",gid=%d", cifs_sb->mnt_gid);
320 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)
321 seq_printf(s, ",posixpaths");
322 seq_printf(s, ",rsize=%d", cifs_sb->rsize);
323 seq_printf(s, ",wsize=%d", cifs_sb->wsize);
328 #ifdef CONFIG_CIFS_QUOTA
329 int cifs_xquota_set(struct super_block *sb, int quota_type, qid_t qid,
330 struct fs_disk_quota *pdquota)
334 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
335 struct cifsTconInfo *pTcon;
338 pTcon = cifs_sb->tcon;
345 cFYI(1, ("set type: 0x%x id: %d", quota_type, qid));
354 int cifs_xquota_get(struct super_block *sb, int quota_type, qid_t qid,
355 struct fs_disk_quota *pdquota)
359 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
360 struct cifsTconInfo *pTcon;
363 pTcon = cifs_sb->tcon;
369 cFYI(1, ("set type: 0x%x id: %d", quota_type, qid));
378 int cifs_xstate_set(struct super_block *sb, unsigned int flags, int operation)
382 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
383 struct cifsTconInfo *pTcon;
386 pTcon = cifs_sb->tcon;
392 cFYI(1, ("flags: 0x%x operation: 0x%x", flags, operation));
401 int cifs_xstate_get(struct super_block *sb, struct fs_quota_stat *qstats)
405 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
406 struct cifsTconInfo *pTcon;
409 pTcon = cifs_sb->tcon;
415 cFYI(1, ("pqstats %p", qstats));
424 static struct quotactl_ops cifs_quotactl_ops = {
425 .set_xquota = cifs_xquota_set,
426 .get_xquota = cifs_xquota_set,
427 .set_xstate = cifs_xstate_set,
428 .get_xstate = cifs_xstate_get,
432 static void cifs_umount_begin(struct vfsmount *vfsmnt, int flags)
434 struct cifs_sb_info *cifs_sb;
435 struct cifsTconInfo *tcon;
437 if (!(flags & MNT_FORCE))
439 cifs_sb = CIFS_SB(vfsmnt->mnt_sb);
443 tcon = cifs_sb->tcon;
446 down(&tcon->tconSem);
447 if (atomic_read(&tcon->useCount) == 1)
448 tcon->tidStatus = CifsExiting;
451 /* cancel_brl_requests(tcon); */ /* BB mark all brl mids as exiting */
452 /* cancel_notify_requests(tcon); */
453 if (tcon->ses && tcon->ses->server) {
454 cFYI(1, ("wake up tasks now - umount begin not complete"));
455 wake_up_all(&tcon->ses->server->request_q);
456 wake_up_all(&tcon->ses->server->response_q);
457 msleep(1); /* yield */
458 /* we have to kick the requests once more */
459 wake_up_all(&tcon->ses->server->response_q);
462 /* BB FIXME - finish add checks for tidStatus BB */
467 #ifdef CONFIG_CIFS_STATS2
468 static int cifs_show_stats(struct seq_file *s, struct vfsmount *mnt)
475 static int cifs_remount(struct super_block *sb, int *flags, char *data)
477 *flags |= MS_NODIRATIME;
481 static const struct super_operations cifs_super_ops = {
482 .read_inode = cifs_read_inode,
483 .put_super = cifs_put_super,
484 .statfs = cifs_statfs,
485 .alloc_inode = cifs_alloc_inode,
486 .destroy_inode = cifs_destroy_inode,
487 /* .drop_inode = generic_delete_inode,
488 .delete_inode = cifs_delete_inode, */ /* Do not need above two
489 functions unless later we add lazy close of inodes or unless the
490 kernel forgets to call us with the same number of releases (closes)
492 .show_options = cifs_show_options,
493 .umount_begin = cifs_umount_begin,
494 .remount_fs = cifs_remount,
495 #ifdef CONFIG_CIFS_STATS2
496 .show_stats = cifs_show_stats,
501 cifs_get_sb(struct file_system_type *fs_type,
502 int flags, const char *dev_name, void *data, struct vfsmount *mnt)
505 struct super_block *sb = sget(fs_type, NULL, set_anon_super, NULL);
507 cFYI(1, ("Devname: %s flags: %d ", dev_name, flags));
514 rc = cifs_read_super(sb, data, dev_name, flags & MS_SILENT ? 1 : 0);
516 up_write(&sb->s_umount);
517 deactivate_super(sb);
520 sb->s_flags |= MS_ACTIVE;
521 return simple_set_mnt(mnt, sb);
524 static ssize_t cifs_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
525 unsigned long nr_segs, loff_t pos)
527 struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode;
530 written = generic_file_aio_write(iocb, iov, nr_segs, pos);
531 if (!CIFS_I(inode)->clientCanCacheAll)
532 filemap_fdatawrite(inode->i_mapping);
536 static loff_t cifs_llseek(struct file *file, loff_t offset, int origin)
538 /* origin == SEEK_END => we must revalidate the cached file length */
539 if (origin == SEEK_END) {
542 /* some applications poll for the file length in this strange
543 way so we must seek to end on non-oplocked files by
544 setting the revalidate time to zero */
545 CIFS_I(file->f_path.dentry->d_inode)->time = 0;
547 retval = cifs_revalidate(file->f_path.dentry);
549 return (loff_t)retval;
551 return remote_llseek(file, offset, origin);
554 static struct file_system_type cifs_fs_type = {
555 .owner = THIS_MODULE,
557 .get_sb = cifs_get_sb,
558 .kill_sb = kill_anon_super,
561 const struct inode_operations cifs_dir_inode_ops = {
562 .create = cifs_create,
563 .lookup = cifs_lookup,
564 .getattr = cifs_getattr,
565 .unlink = cifs_unlink,
566 .link = cifs_hardlink,
569 .rename = cifs_rename,
570 .permission = cifs_permission,
571 /* revalidate:cifs_revalidate, */
572 .setattr = cifs_setattr,
573 .symlink = cifs_symlink,
575 #ifdef CONFIG_CIFS_XATTR
576 .setxattr = cifs_setxattr,
577 .getxattr = cifs_getxattr,
578 .listxattr = cifs_listxattr,
579 .removexattr = cifs_removexattr,
583 const struct inode_operations cifs_file_inode_ops = {
584 /* revalidate:cifs_revalidate, */
585 .setattr = cifs_setattr,
586 .getattr = cifs_getattr, /* do we need this anymore? */
587 .rename = cifs_rename,
588 .permission = cifs_permission,
589 #ifdef CONFIG_CIFS_XATTR
590 .setxattr = cifs_setxattr,
591 .getxattr = cifs_getxattr,
592 .listxattr = cifs_listxattr,
593 .removexattr = cifs_removexattr,
597 const struct inode_operations cifs_symlink_inode_ops = {
598 .readlink = generic_readlink,
599 .follow_link = cifs_follow_link,
600 .put_link = cifs_put_link,
601 .permission = cifs_permission,
602 /* BB add the following two eventually */
603 /* revalidate: cifs_revalidate,
604 setattr: cifs_notify_change, *//* BB do we need notify change */
605 #ifdef CONFIG_CIFS_XATTR
606 .setxattr = cifs_setxattr,
607 .getxattr = cifs_getxattr,
608 .listxattr = cifs_listxattr,
609 .removexattr = cifs_removexattr,
613 const struct file_operations cifs_file_ops = {
614 .read = do_sync_read,
615 .write = do_sync_write,
616 .aio_read = generic_file_aio_read,
617 .aio_write = cifs_file_aio_write,
619 .release = cifs_close,
623 .mmap = cifs_file_mmap,
624 .splice_read = generic_file_splice_read,
625 .llseek = cifs_llseek,
626 #ifdef CONFIG_CIFS_POSIX
628 #endif /* CONFIG_CIFS_POSIX */
630 #ifdef CONFIG_CIFS_EXPERIMENTAL
631 .dir_notify = cifs_dir_notify,
632 #endif /* CONFIG_CIFS_EXPERIMENTAL */
635 const struct file_operations cifs_file_direct_ops = {
636 /* no mmap, no aio, no readv -
637 BB reevaluate whether they can be done with directio, no cache */
638 .read = cifs_user_read,
639 .write = cifs_user_write,
641 .release = cifs_close,
645 .splice_read = generic_file_splice_read,
646 #ifdef CONFIG_CIFS_POSIX
648 #endif /* CONFIG_CIFS_POSIX */
649 .llseek = cifs_llseek,
650 #ifdef CONFIG_CIFS_EXPERIMENTAL
651 .dir_notify = cifs_dir_notify,
652 #endif /* CONFIG_CIFS_EXPERIMENTAL */
654 const struct file_operations cifs_file_nobrl_ops = {
655 .read = do_sync_read,
656 .write = do_sync_write,
657 .aio_read = generic_file_aio_read,
658 .aio_write = cifs_file_aio_write,
660 .release = cifs_close,
663 .mmap = cifs_file_mmap,
664 .splice_read = generic_file_splice_read,
665 .llseek = cifs_llseek,
666 #ifdef CONFIG_CIFS_POSIX
668 #endif /* CONFIG_CIFS_POSIX */
670 #ifdef CONFIG_CIFS_EXPERIMENTAL
671 .dir_notify = cifs_dir_notify,
672 #endif /* CONFIG_CIFS_EXPERIMENTAL */
675 const struct file_operations cifs_file_direct_nobrl_ops = {
676 /* no mmap, no aio, no readv -
677 BB reevaluate whether they can be done with directio, no cache */
678 .read = cifs_user_read,
679 .write = cifs_user_write,
681 .release = cifs_close,
684 .splice_read = generic_file_splice_read,
685 #ifdef CONFIG_CIFS_POSIX
687 #endif /* CONFIG_CIFS_POSIX */
688 .llseek = cifs_llseek,
689 #ifdef CONFIG_CIFS_EXPERIMENTAL
690 .dir_notify = cifs_dir_notify,
691 #endif /* CONFIG_CIFS_EXPERIMENTAL */
694 const struct file_operations cifs_dir_ops = {
695 .readdir = cifs_readdir,
696 .release = cifs_closedir,
697 .read = generic_read_dir,
698 #ifdef CONFIG_CIFS_EXPERIMENTAL
699 .dir_notify = cifs_dir_notify,
700 #endif /* CONFIG_CIFS_EXPERIMENTAL */
705 cifs_init_once(struct kmem_cache *cachep, void *inode)
707 struct cifsInodeInfo *cifsi = inode;
709 inode_init_once(&cifsi->vfs_inode);
710 INIT_LIST_HEAD(&cifsi->lockList);
714 cifs_init_inodecache(void)
716 cifs_inode_cachep = kmem_cache_create("cifs_inode_cache",
717 sizeof(struct cifsInodeInfo),
718 0, (SLAB_RECLAIM_ACCOUNT|
721 if (cifs_inode_cachep == NULL)
728 cifs_destroy_inodecache(void)
730 kmem_cache_destroy(cifs_inode_cachep);
734 cifs_init_request_bufs(void)
736 if (CIFSMaxBufSize < 8192) {
737 /* Buffer size can not be smaller than 2 * PATH_MAX since maximum
738 Unicode path name has to fit in any SMB/CIFS path based frames */
739 CIFSMaxBufSize = 8192;
740 } else if (CIFSMaxBufSize > 1024*127) {
741 CIFSMaxBufSize = 1024 * 127;
743 CIFSMaxBufSize &= 0x1FE00; /* Round size to even 512 byte mult*/
745 /* cERROR(1,("CIFSMaxBufSize %d 0x%x",CIFSMaxBufSize,CIFSMaxBufSize)); */
746 cifs_req_cachep = kmem_cache_create("cifs_request",
748 MAX_CIFS_HDR_SIZE, 0,
749 SLAB_HWCACHE_ALIGN, NULL);
750 if (cifs_req_cachep == NULL)
753 if (cifs_min_rcv < 1)
755 else if (cifs_min_rcv > 64) {
757 cERROR(1, ("cifs_min_rcv set to maximum (64)"));
760 cifs_req_poolp = mempool_create_slab_pool(cifs_min_rcv,
763 if (cifs_req_poolp == NULL) {
764 kmem_cache_destroy(cifs_req_cachep);
767 /* MAX_CIFS_SMALL_BUFFER_SIZE bytes is enough for most SMB responses and
768 almost all handle based requests (but not write response, nor is it
769 sufficient for path based requests). A smaller size would have
770 been more efficient (compacting multiple slab items on one 4k page)
771 for the case in which debug was on, but this larger size allows
772 more SMBs to use small buffer alloc and is still much more
773 efficient to alloc 1 per page off the slab compared to 17K (5page)
774 alloc of large cifs buffers even when page debugging is on */
775 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
776 MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
778 if (cifs_sm_req_cachep == NULL) {
779 mempool_destroy(cifs_req_poolp);
780 kmem_cache_destroy(cifs_req_cachep);
784 if (cifs_min_small < 2)
786 else if (cifs_min_small > 256) {
787 cifs_min_small = 256;
788 cFYI(1, ("cifs_min_small set to maximum (256)"));
791 cifs_sm_req_poolp = mempool_create_slab_pool(cifs_min_small,
794 if (cifs_sm_req_poolp == NULL) {
795 mempool_destroy(cifs_req_poolp);
796 kmem_cache_destroy(cifs_req_cachep);
797 kmem_cache_destroy(cifs_sm_req_cachep);
805 cifs_destroy_request_bufs(void)
807 mempool_destroy(cifs_req_poolp);
808 kmem_cache_destroy(cifs_req_cachep);
809 mempool_destroy(cifs_sm_req_poolp);
810 kmem_cache_destroy(cifs_sm_req_cachep);
816 cifs_mid_cachep = kmem_cache_create("cifs_mpx_ids",
817 sizeof(struct mid_q_entry), 0,
818 SLAB_HWCACHE_ALIGN, NULL);
819 if (cifs_mid_cachep == NULL)
822 /* 3 is a reasonable minimum number of simultaneous operations */
823 cifs_mid_poolp = mempool_create_slab_pool(3, cifs_mid_cachep);
824 if (cifs_mid_poolp == NULL) {
825 kmem_cache_destroy(cifs_mid_cachep);
829 cifs_oplock_cachep = kmem_cache_create("cifs_oplock_structs",
830 sizeof(struct oplock_q_entry), 0,
831 SLAB_HWCACHE_ALIGN, NULL);
832 if (cifs_oplock_cachep == NULL) {
833 mempool_destroy(cifs_mid_poolp);
834 kmem_cache_destroy(cifs_mid_cachep);
842 cifs_destroy_mids(void)
844 mempool_destroy(cifs_mid_poolp);
845 kmem_cache_destroy(cifs_mid_cachep);
846 kmem_cache_destroy(cifs_oplock_cachep);
849 static int cifs_oplock_thread(void *dummyarg)
851 struct oplock_q_entry *oplock_item;
852 struct cifsTconInfo *pTcon;
862 spin_lock(&GlobalMid_Lock);
863 if (list_empty(&GlobalOplock_Q)) {
864 spin_unlock(&GlobalMid_Lock);
865 set_current_state(TASK_INTERRUPTIBLE);
866 schedule_timeout(39*HZ);
868 oplock_item = list_entry(GlobalOplock_Q.next,
869 struct oplock_q_entry, qhead);
871 cFYI(1, ("found oplock item to write out"));
872 pTcon = oplock_item->tcon;
873 inode = oplock_item->pinode;
874 netfid = oplock_item->netfid;
875 spin_unlock(&GlobalMid_Lock);
876 DeleteOplockQEntry(oplock_item);
877 /* can not grab inode sem here since it would
878 deadlock when oplock received on delete
879 since vfs_unlink holds the i_mutex across
881 /* mutex_lock(&inode->i_mutex);*/
882 if (S_ISREG(inode->i_mode)) {
884 filemap_fdatawrite(inode->i_mapping);
885 if (CIFS_I(inode)->clientCanCacheRead
887 filemap_fdatawait(inode->i_mapping);
888 invalidate_remote_inode(inode);
892 /* mutex_unlock(&inode->i_mutex);*/
894 CIFS_I(inode)->write_behind_rc = rc;
895 cFYI(1, ("Oplock flush inode %p rc %d",
898 /* releasing stale oplock after recent reconnect
899 of smb session using a now incorrect file
900 handle is not a data integrity issue but do
901 not bother sending an oplock release if session
902 to server still is disconnected since oplock
903 already released by the server in that case */
904 if (pTcon->tidStatus != CifsNeedReconnect) {
905 rc = CIFSSMBLock(0, pTcon, netfid,
906 0 /* len */ , 0 /* offset */, 0,
907 0, LOCKING_ANDX_OPLOCK_RELEASE,
909 cFYI(1, ("Oplock release rc = %d", rc));
912 spin_unlock(&GlobalMid_Lock);
913 set_current_state(TASK_INTERRUPTIBLE);
914 schedule_timeout(1); /* yield in case q were corrupt */
916 } while (!kthread_should_stop());
921 static int cifs_dnotify_thread(void *dummyarg)
923 struct list_head *tmp;
924 struct cifsSesInfo *ses;
929 set_current_state(TASK_INTERRUPTIBLE);
930 schedule_timeout(15*HZ);
931 read_lock(&GlobalSMBSeslock);
932 /* check if any stuck requests that need
933 to be woken up and wakeq so the
934 thread can wake up and error out */
935 list_for_each(tmp, &GlobalSMBSessionList) {
936 ses = list_entry(tmp, struct cifsSesInfo,
938 if (ses && ses->server &&
939 atomic_read(&ses->server->inFlight))
940 wake_up_all(&ses->server->response_q);
942 read_unlock(&GlobalSMBSeslock);
943 } while (!kthread_should_stop());
952 #ifdef CONFIG_PROC_FS
955 /* INIT_LIST_HEAD(&GlobalServerList);*/ /* BB not implemented yet */
956 INIT_LIST_HEAD(&GlobalSMBSessionList);
957 INIT_LIST_HEAD(&GlobalTreeConnectionList);
958 INIT_LIST_HEAD(&GlobalOplock_Q);
959 #ifdef CONFIG_CIFS_EXPERIMENTAL
960 INIT_LIST_HEAD(&GlobalDnotifyReqList);
961 INIT_LIST_HEAD(&GlobalDnotifyRsp_Q);
964 * Initialize Global counters
966 atomic_set(&sesInfoAllocCount, 0);
967 atomic_set(&tconInfoAllocCount, 0);
968 atomic_set(&tcpSesAllocCount, 0);
969 atomic_set(&tcpSesReconnectCount, 0);
970 atomic_set(&tconInfoReconnectCount, 0);
972 atomic_set(&bufAllocCount, 0);
973 atomic_set(&smBufAllocCount, 0);
974 #ifdef CONFIG_CIFS_STATS2
975 atomic_set(&totBufAllocCount, 0);
976 atomic_set(&totSmBufAllocCount, 0);
977 #endif /* CONFIG_CIFS_STATS2 */
979 atomic_set(&midCount, 0);
980 GlobalCurrentXid = 0;
981 GlobalTotalActiveXid = 0;
982 GlobalMaxActiveXid = 0;
983 memset(Local_System_Name, 0, 15);
984 rwlock_init(&GlobalSMBSeslock);
985 spin_lock_init(&GlobalMid_Lock);
987 if (cifs_max_pending < 2) {
988 cifs_max_pending = 2;
989 cFYI(1, ("cifs_max_pending set to min of 2"));
990 } else if (cifs_max_pending > 256) {
991 cifs_max_pending = 256;
992 cFYI(1, ("cifs_max_pending set to max of 256"));
995 rc = cifs_init_inodecache();
999 rc = cifs_init_mids();
1001 goto out_destroy_inodecache;
1003 rc = cifs_init_request_bufs();
1005 goto out_destroy_mids;
1007 rc = register_filesystem(&cifs_fs_type);
1009 goto out_destroy_request_bufs;
1010 #ifdef CONFIG_CIFS_UPCALL
1011 rc = register_key_type(&cifs_spnego_key_type);
1013 goto out_unregister_filesystem;
1015 oplockThread = kthread_run(cifs_oplock_thread, NULL, "cifsoplockd");
1016 if (IS_ERR(oplockThread)) {
1017 rc = PTR_ERR(oplockThread);
1018 cERROR(1, ("error %d create oplock thread", rc));
1019 goto out_unregister_key_type;
1022 dnotifyThread = kthread_run(cifs_dnotify_thread, NULL, "cifsdnotifyd");
1023 if (IS_ERR(dnotifyThread)) {
1024 rc = PTR_ERR(dnotifyThread);
1025 cERROR(1, ("error %d create dnotify thread", rc));
1026 goto out_stop_oplock_thread;
1031 out_stop_oplock_thread:
1032 kthread_stop(oplockThread);
1033 out_unregister_key_type:
1034 #ifdef CONFIG_CIFS_UPCALL
1035 unregister_key_type(&cifs_spnego_key_type);
1036 out_unregister_filesystem:
1038 unregister_filesystem(&cifs_fs_type);
1039 out_destroy_request_bufs:
1040 cifs_destroy_request_bufs();
1042 cifs_destroy_mids();
1043 out_destroy_inodecache:
1044 cifs_destroy_inodecache();
1046 #ifdef CONFIG_PROC_FS
1055 cFYI(0, ("exit_cifs"));
1056 #ifdef CONFIG_PROC_FS
1059 #ifdef CONFIG_CIFS_UPCALL
1060 unregister_key_type(&cifs_spnego_key_type);
1062 unregister_filesystem(&cifs_fs_type);
1063 cifs_destroy_inodecache();
1064 cifs_destroy_mids();
1065 cifs_destroy_request_bufs();
1066 kthread_stop(oplockThread);
1067 kthread_stop(dnotifyThread);
1070 MODULE_AUTHOR("Steve French <sfrench@us.ibm.com>");
1071 MODULE_LICENSE("GPL"); /* combination of LGPL + GPL source behaves as GPL */
1073 ("VFS to access servers complying with the SNIA CIFS Specification "
1074 "e.g. Samba and Windows");
1075 MODULE_VERSION(CIFS_VERSION);
1076 module_init(init_cifs)
1077 module_exit(exit_cifs)