X-Git-Url: https://err.no/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=fs%2Finotify.c;h=a37e9fb1da589fa1457929e2079e176220ad34c0;hb=7ea6040b0eff07d3a9a4e2d248ac137c6ad02d42;hp=e423bfe0c86f9449db088053d935dd6b98a02c64;hpb=1b3035b7fcc72b6b36f2a3634dad832eb2453ce8;p=linux-2.6 diff --git a/fs/inotify.c b/fs/inotify.c index e423bfe0c8..a37e9fb1da 100644 --- a/fs/inotify.c +++ b/fs/inotify.c @@ -29,8 +29,6 @@ #include #include #include -#include -#include #include #include #include @@ -39,14 +37,15 @@ #include static atomic_t inotify_cookie; +static atomic_t inotify_watches; static kmem_cache_t *watch_cachep; static kmem_cache_t *event_cachep; static struct vfsmount *inotify_mnt; -/* These are configurable via /proc/sys/inotify */ -int inotify_max_user_devices; +/* these are configurable via /proc/sys/fs/inotify/ */ +int inotify_max_user_instances; int inotify_max_user_watches; int inotify_max_queued_events; @@ -64,8 +63,8 @@ int inotify_max_queued_events; * Lifetimes of the three main data structures--inotify_device, inode, and * inotify_watch--are managed by reference count. * - * inotify_device: Lifetime is from open until release. Additional references - * can bump the count via get_inotify_dev() and drop the count via + * inotify_device: Lifetime is from inotify_init() until release. Additional + * references can bump the count via get_inotify_dev() and drop the count via * put_inotify_dev(). * * inotify_watch: Lifetime is from create_watch() to destory_watch(). @@ -77,7 +76,7 @@ int inotify_max_queued_events; */ /* - * struct inotify_device - represents an open instance of an inotify device + * struct inotify_device - represents an inotify instance * * This structure is protected by the semaphore 'sem'. */ @@ -92,6 +91,7 @@ struct inotify_device { unsigned int queue_size; /* size of the queue (bytes) */ unsigned int event_count; /* number of pending events */ unsigned int max_events; /* maximum number of events */ + u32 last_wd; /* the last wd allocated */ }; /* @@ -125,6 +125,47 @@ struct inotify_watch { u32 mask; /* event mask for this watch */ }; +#ifdef CONFIG_SYSCTL + +#include + +static int zero; + +ctl_table inotify_table[] = { + { + .ctl_name = INOTIFY_MAX_USER_INSTANCES, + .procname = "max_user_instances", + .data = &inotify_max_user_instances, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &proc_dointvec_minmax, + .strategy = &sysctl_intvec, + .extra1 = &zero, + }, + { + .ctl_name = INOTIFY_MAX_USER_WATCHES, + .procname = "max_user_watches", + .data = &inotify_max_user_watches, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &proc_dointvec_minmax, + .strategy = &sysctl_intvec, + .extra1 = &zero, + }, + { + .ctl_name = INOTIFY_MAX_QUEUED_EVENTS, + .procname = "max_queued_events", + .data = &inotify_max_queued_events, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &proc_dointvec_minmax, + .strategy = &sysctl_intvec, + .extra1 = &zero + }, + { .ctl_name = 0 } +}; +#endif /* CONFIG_SYSCTL */ + static inline void get_inotify_dev(struct inotify_device *dev) { atomic_inc(&dev->count); @@ -313,7 +354,7 @@ static int inotify_dev_get_wd(struct inotify_device *dev, do { if (unlikely(!idr_pre_get(&dev->idr, GFP_KERNEL))) return -ENOSPC; - ret = idr_get_new(&dev->idr, watch, &watch->wd); + ret = idr_get_new_above(&dev->idr, watch, dev->last_wd+1, &watch->wd); } while (ret == -EAGAIN); return ret; @@ -332,7 +373,7 @@ static int find_inode(const char __user *dirname, struct nameidata *nd) /* you can only watch an inode if you have read permissions on it */ error = permission(nd->dentry->d_inode, MAY_READ, NULL); if (error) - path_release (nd); + path_release(nd); return error; } @@ -348,7 +389,8 @@ static struct inotify_watch *create_watch(struct inotify_device *dev, struct inotify_watch *watch; int ret; - if (atomic_read(&dev->user->inotify_watches) >= inotify_max_user_watches) + if (atomic_read(&dev->user->inotify_watches) >= + inotify_max_user_watches) return ERR_PTR(-ENOSPC); watch = kmem_cache_alloc(watch_cachep, GFP_KERNEL); @@ -361,6 +403,7 @@ static struct inotify_watch *create_watch(struct inotify_device *dev, return ERR_PTR(ret); } + dev->last_wd = watch->wd; watch->mask = mask; atomic_set(&watch->count, 0); INIT_LIST_HEAD(&watch->d_list); @@ -380,6 +423,7 @@ static struct inotify_watch *create_watch(struct inotify_device *dev, get_inotify_watch(watch); atomic_inc(&dev->user->inotify_watches); + atomic_inc(&inotify_watches); return watch; } @@ -412,6 +456,7 @@ static void remove_watch_no_event(struct inotify_watch *watch, list_del(&watch->d_list); atomic_dec(&dev->user->inotify_watches); + atomic_dec(&inotify_watches); idr_remove(&dev->idr, watch->wd); put_inotify_watch(watch); } @@ -490,6 +535,9 @@ void inotify_dentry_parent_queue_event(struct dentry *dentry, u32 mask, struct dentry *parent; struct inode *inode; + if (!atomic_read (&inotify_watches)) + return; + spin_lock(&dentry->d_lock); parent = dentry->d_parent; inode = parent->d_inode; @@ -744,15 +792,14 @@ static int inotify_release(struct inode *ignored, struct file *file) inotify_dev_event_dequeue(dev); up(&dev->sem); - /* free this device: the put matching the get in inotify_open() */ + /* free this device: the put matching the get in inotify_init() */ put_inotify_dev(dev); return 0; } /* - * inotify_ignore - handle the INOTIFY_IGNORE ioctl, asking that a given wd be - * removed from the device. + * inotify_ignore - remove a given wd from this inotify instance. * * Can sleep. */ @@ -817,42 +864,40 @@ asmlinkage long sys_inotify_init(void) { struct inotify_device *dev; struct user_struct *user; - int ret = -ENOTTY; - int fd; - struct file *filp; + struct file *filp; + int fd, ret; fd = get_unused_fd(); - if (fd < 0) { - ret = fd; - goto out; - } + if (fd < 0) + return fd; filp = get_empty_filp(); if (!filp) { - put_unused_fd(fd); ret = -ENFILE; - goto out; + goto out_put_fd; } - filp->f_op = &inotify_fops; - filp->f_vfsmnt = mntget(inotify_mnt); - filp->f_dentry = dget(inotify_mnt->mnt_root); - filp->f_mapping = filp->f_dentry->d_inode->i_mapping; - filp->f_mode = FMODE_READ; - filp->f_flags = O_RDONLY; user = get_uid(current->user); - - if (unlikely(atomic_read(&user->inotify_devs) >= inotify_max_user_devices)) { + if (unlikely(atomic_read(&user->inotify_devs) >= + inotify_max_user_instances)) { ret = -EMFILE; - goto out_err; + goto out_free_uid; } dev = kmalloc(sizeof(struct inotify_device), GFP_KERNEL); if (unlikely(!dev)) { ret = -ENOMEM; - goto out_err; + goto out_free_uid; } + filp->f_op = &inotify_fops; + filp->f_vfsmnt = mntget(inotify_mnt); + filp->f_dentry = dget(inotify_mnt->mnt_root); + filp->f_mapping = filp->f_dentry->d_inode->i_mapping; + filp->f_mode = FMODE_READ; + filp->f_flags = O_RDONLY; + filp->private_data = dev; + idr_init(&dev->idr); INIT_LIST_HEAD(&dev->events); INIT_LIST_HEAD(&dev->watches); @@ -862,50 +907,59 @@ asmlinkage long sys_inotify_init(void) dev->queue_size = 0; dev->max_events = inotify_max_queued_events; dev->user = user; + dev->last_wd = 0; atomic_set(&dev->count, 0); get_inotify_dev(dev); atomic_inc(&user->inotify_devs); + fd_install(fd, filp); - filp->private_data = dev; - fd_install (fd, filp); return fd; -out_err: - put_unused_fd (fd); - put_filp (filp); +out_free_uid: free_uid(user); -out: + put_filp(filp); +out_put_fd: + put_unused_fd(fd); return ret; } -asmlinkage long sys_inotify_add_watch(int fd, const char *path, u32 mask) +asmlinkage long sys_inotify_add_watch(int fd, const char __user *path, u32 mask) { struct inotify_watch *watch, *old; struct inode *inode; struct inotify_device *dev; struct nameidata nd; struct file *filp; - int ret; + int ret, fput_needed; + int mask_add = 0; - filp = fget(fd); - if (!filp) + filp = fget_light(fd, &fput_needed); + if (unlikely(!filp)) return -EBADF; - dev = filp->private_data; + /* verify that this is indeed an inotify instance */ + if (unlikely(filp->f_op != &inotify_fops)) { + ret = -EINVAL; + goto fput_and_out; + } - ret = find_inode ((const char __user*)path, &nd); - if (ret) + ret = find_inode(path, &nd); + if (unlikely(ret)) goto fput_and_out; - /* Held in place by reference in nd */ + /* inode held in place by reference to nd; dev by fget on fd */ inode = nd.dentry->d_inode; + dev = filp->private_data; down(&inode->inotify_sem); down(&dev->sem); + if (mask & IN_MASK_ADD) + mask_add = 1; + /* don't let user-space set invalid bits: we don't want flags set */ mask &= IN_ALL_EVENTS; - if (!mask) { + if (unlikely(!mask)) { ret = -EINVAL; goto out; } @@ -916,7 +970,10 @@ asmlinkage long sys_inotify_add_watch(int fd, const char *path, u32 mask) */ old = inode_find_dev(inode, dev); if (unlikely(old)) { - old->mask = mask; + if (mask_add) + old->mask |= mask; + else + old->mask = mask; ret = old->wd; goto out; } @@ -932,11 +989,11 @@ asmlinkage long sys_inotify_add_watch(int fd, const char *path, u32 mask) list_add(&watch->i_list, &inode->inotify_watches); ret = watch->wd; out: - path_release (&nd); up(&dev->sem); up(&inode->inotify_sem); + path_release(&nd); fput_and_out: - fput(filp); + fput_light(filp, fput_needed); return ret; } @@ -944,14 +1001,23 @@ asmlinkage long sys_inotify_rm_watch(int fd, u32 wd) { struct file *filp; struct inotify_device *dev; - int ret; + int ret, fput_needed; - filp = fget(fd); - if (!filp) + filp = fget_light(fd, &fput_needed); + if (unlikely(!filp)) return -EBADF; + + /* verify that this is indeed an inotify instance */ + if (unlikely(filp->f_op != &inotify_fops)) { + ret = -EINVAL; + goto out; + } + dev = filp->private_data; - ret = inotify_ignore (dev, wd); - fput(filp); + ret = inotify_ignore(dev, wd); + +out: + fput_light(filp, fput_needed); return ret; } @@ -969,20 +1035,28 @@ static struct file_system_type inotify_fs_type = { }; /* - * inotify_init - Our initialization function. Note that we cannnot return + * inotify_setup - Our initialization function. Note that we cannnot return * error because we have compiled-in VFS hooks. So an (unlikely) failure here * must result in panic(). */ -static int __init inotify_init(void) +static int __init inotify_setup(void) { - register_filesystem(&inotify_fs_type); + int ret; + + ret = register_filesystem(&inotify_fs_type); + if (unlikely(ret)) + panic("inotify: register_filesystem returned %d!\n", ret); + inotify_mnt = kern_mount(&inotify_fs_type); + if (IS_ERR(inotify_mnt)) + panic("inotify: kern_mount ret %ld!\n", PTR_ERR(inotify_mnt)); - inotify_max_queued_events = 8192; - inotify_max_user_devices = 128; + inotify_max_queued_events = 16384; + inotify_max_user_instances = 128; inotify_max_user_watches = 8192; atomic_set(&inotify_cookie, 0); + atomic_set(&inotify_watches, 0); watch_cachep = kmem_cache_create("inotify_watch_cache", sizeof(struct inotify_watch), @@ -991,9 +1065,7 @@ static int __init inotify_init(void) sizeof(struct inotify_kernel_event), 0, SLAB_PANIC, NULL, NULL); - printk(KERN_INFO "inotify syscall\n"); - return 0; } -module_init(inotify_init); +module_init(inotify_setup);