X-Git-Url: https://err.no/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=kernel%2Fcgroup.c;h=1a3c23936d43d99ec3d429123c95182a94de0c02;hb=856848737bd944c1db3ce0a66bbf67e56bd6f77d;hp=db245f19eb8a761d727da30fe1790f2a4c050a57;hpb=a424316ca154317367c7ddf89997d1c80e4a8051;p=linux-2.6 diff --git a/kernel/cgroup.c b/kernel/cgroup.c index db245f19eb..1a3c23936d 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -1,6 +1,4 @@ /* - * kernel/cgroup.c - * * Generic process-grouping system. * * Based originally on the cpuset system, extracted by Paul Menage @@ -36,14 +34,21 @@ #include #include #include +#include #include #include #include #include #include #include +#include +#include +#include + #include +static DEFINE_MUTEX(cgroup_mutex); + /* Generate an array of cgroup subsystem pointers */ #define SUBSYS(_x) &_x ## _subsys, @@ -82,6 +87,13 @@ struct cgroupfs_root { /* Hierarchy-specific flags */ unsigned long flags; + + /* The path to use for release notifications. No locking + * between setting and use - so if userspace updates this + * while child cgroups exist, you could miss a + * notification. We ensure that it's always a valid + * NUL-terminated string */ + char release_agent_path[PATH_MAX]; }; @@ -95,6 +107,7 @@ static struct cgroupfs_root rootnode; /* The list of hierarchy roots */ static LIST_HEAD(roots); +static int root_count; /* dummytop is a shorthand for the dummy hierarchy's top cgroup */ #define dummytop (&rootnode.top_cgroup) @@ -108,13 +121,19 @@ static int need_forkexit_callback; /* bits in struct cgroup flags field */ enum { - CONT_REMOVED, + /* Control Group is dead */ + CGRP_REMOVED, + /* Control Group has previously had a child cgroup or a task, + * but no longer (only if CGRP_NOTIFY_ON_RELEASE is set) */ + CGRP_RELEASABLE, + /* Control Group requires release notifications to userspace */ + CGRP_NOTIFY_ON_RELEASE, }; /* convenient tests for these bits */ -inline int cgroup_is_removed(const struct cgroup *cont) +inline int cgroup_is_removed(const struct cgroup *cgrp) { - return test_bit(CONT_REMOVED, &cont->flags); + return test_bit(CGRP_REMOVED, &cgrp->flags); } /* bits in struct cgroupfs_root flags field */ @@ -122,6 +141,19 @@ enum { ROOT_NOPREFIX, /* mounted subsystems have no named prefix */ }; +inline int cgroup_is_releasable(const struct cgroup *cgrp) +{ + const int bits = + (1 << CGRP_RELEASABLE) | + (1 << CGRP_NOTIFY_ON_RELEASE); + return (cgrp->flags & bits) == bits; +} + +inline int notify_on_release(const struct cgroup *cgrp) +{ + return test_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags); +} + /* * for_each_subsys() allows you to iterate on each subsystem attached to * an active hierarchy @@ -133,12 +165,57 @@ list_for_each_entry(_ss, &_root->subsys_list, sibling) #define for_each_root(_root) \ list_for_each_entry(_root, &roots, root_list) -/* Each task_struct has an embedded css_set, so the get/put - * operation simply takes a reference count on all the cgroups - * referenced by subsystems in this css_set. This can end up - * multiple-counting some cgroups, but that's OK - the ref-count is - * just a busy/not-busy indicator; ensuring that we only count each - * cgroup once would require taking a global lock to ensure that no +/* the list of cgroups eligible for automatic release. Protected by + * release_list_lock */ +static LIST_HEAD(release_list); +static DEFINE_SPINLOCK(release_list_lock); +static void cgroup_release_agent(struct work_struct *work); +static DECLARE_WORK(release_agent_work, cgroup_release_agent); +static void check_for_release(struct cgroup *cgrp); + +/* Link structure for associating css_set objects with cgroups */ +struct cg_cgroup_link { + /* + * List running through cg_cgroup_links associated with a + * cgroup, anchored on cgroup->css_sets + */ + struct list_head cgrp_link_list; + /* + * List running through cg_cgroup_links pointing at a + * single css_set object, anchored on css_set->cg_links + */ + struct list_head cg_link_list; + struct css_set *cg; +}; + +/* The default css_set - used by init and its children prior to any + * hierarchies being mounted. It contains a pointer to the root state + * for each subsystem. Also used to anchor the list of css_sets. Not + * reference-counted, to improve performance when child cgroups + * haven't been created. + */ + +static struct css_set init_css_set; +static struct cg_cgroup_link init_css_set_link; + +/* css_set_lock protects the list of css_set objects, and the + * chain of tasks off each css_set. Nests outside task->alloc_lock + * due to cgroup_iter_start() */ +static DEFINE_RWLOCK(css_set_lock); +static int css_set_count; + +/* We don't maintain the lists running through each css_set to its + * task until after the first call to cgroup_iter_start(). This + * reduces the fork()/exit() overhead for people who have cgroups + * compiled into their kernel but not actually in use */ +static int use_task_css_set_links; + +/* When we create or destroy a css_set, the operation simply + * takes/releases a reference count on all the cgroups referenced + * by subsystems in this css_set. This can end up multiple-counting + * some cgroups, but that's OK - the ref-count is just a + * busy/not-busy indicator; ensuring that we only count each cgroup + * once would require taking a global lock to ensure that no * subsystems moved between hierarchies while we were doing so. * * Possible TODO: decide at boot time based on the number of @@ -146,18 +223,260 @@ list_for_each_entry(_root, &roots, root_list) * it's better for performance to ref-count every subsystem, or to * take a global lock and only add one ref count to each hierarchy. */ -static void get_css_set(struct css_set *cg) + +/* + * unlink a css_set from the list and free it + */ +static void unlink_css_set(struct css_set *cg) +{ + write_lock(&css_set_lock); + list_del(&cg->list); + css_set_count--; + while (!list_empty(&cg->cg_links)) { + struct cg_cgroup_link *link; + link = list_entry(cg->cg_links.next, + struct cg_cgroup_link, cg_link_list); + list_del(&link->cg_link_list); + list_del(&link->cgrp_link_list); + kfree(link); + } + write_unlock(&css_set_lock); +} + +static void __release_css_set(struct kref *k, int taskexit) { int i; - for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) - atomic_inc(&cg->subsys[i]->cgroup->count); + struct css_set *cg = container_of(k, struct css_set, ref); + + unlink_css_set(cg); + + rcu_read_lock(); + for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) { + struct cgroup *cgrp = cg->subsys[i]->cgroup; + if (atomic_dec_and_test(&cgrp->count) && + notify_on_release(cgrp)) { + if (taskexit) + set_bit(CGRP_RELEASABLE, &cgrp->flags); + check_for_release(cgrp); + } + } + rcu_read_unlock(); + kfree(cg); +} + +static void release_css_set(struct kref *k) +{ + __release_css_set(k, 0); +} + +static void release_css_set_taskexit(struct kref *k) +{ + __release_css_set(k, 1); } -static void put_css_set(struct css_set *cg) +/* + * refcounted get/put for css_set objects + */ +static inline void get_css_set(struct css_set *cg) +{ + kref_get(&cg->ref); +} + +static inline void put_css_set(struct css_set *cg) +{ + kref_put(&cg->ref, release_css_set); +} + +static inline void put_css_set_taskexit(struct css_set *cg) +{ + kref_put(&cg->ref, release_css_set_taskexit); +} + +/* + * find_existing_css_set() is a helper for + * find_css_set(), and checks to see whether an existing + * css_set is suitable. This currently walks a linked-list for + * simplicity; a later patch will use a hash table for better + * performance + * + * oldcg: the cgroup group that we're using before the cgroup + * transition + * + * cgrp: the cgroup that we're moving into + * + * template: location in which to build the desired set of subsystem + * state objects for the new cgroup group + */ + +static struct css_set *find_existing_css_set( + struct css_set *oldcg, + struct cgroup *cgrp, + struct cgroup_subsys_state *template[]) { int i; - for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) - atomic_dec(&cg->subsys[i]->cgroup->count); + struct cgroupfs_root *root = cgrp->root; + struct list_head *l = &init_css_set.list; + + /* Built the set of subsystem state objects that we want to + * see in the new css_set */ + for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) { + if (root->subsys_bits & (1ull << i)) { + /* Subsystem is in this hierarchy. So we want + * the subsystem state from the new + * cgroup */ + template[i] = cgrp->subsys[i]; + } else { + /* Subsystem is not in this hierarchy, so we + * don't want to change the subsystem state */ + template[i] = oldcg->subsys[i]; + } + } + + /* Look through existing cgroup groups to find one to reuse */ + do { + struct css_set *cg = + list_entry(l, struct css_set, list); + + if (!memcmp(template, cg->subsys, sizeof(cg->subsys))) { + /* All subsystems matched */ + return cg; + } + /* Try the next cgroup group */ + l = l->next; + } while (l != &init_css_set.list); + + /* No existing cgroup group matched */ + return NULL; +} + +/* + * allocate_cg_links() allocates "count" cg_cgroup_link structures + * and chains them on tmp through their cgrp_link_list fields. Returns 0 on + * success or a negative error + */ + +static int allocate_cg_links(int count, struct list_head *tmp) +{ + struct cg_cgroup_link *link; + int i; + INIT_LIST_HEAD(tmp); + for (i = 0; i < count; i++) { + link = kmalloc(sizeof(*link), GFP_KERNEL); + if (!link) { + while (!list_empty(tmp)) { + link = list_entry(tmp->next, + struct cg_cgroup_link, + cgrp_link_list); + list_del(&link->cgrp_link_list); + kfree(link); + } + return -ENOMEM; + } + list_add(&link->cgrp_link_list, tmp); + } + return 0; +} + +static void free_cg_links(struct list_head *tmp) +{ + while (!list_empty(tmp)) { + struct cg_cgroup_link *link; + link = list_entry(tmp->next, + struct cg_cgroup_link, + cgrp_link_list); + list_del(&link->cgrp_link_list); + kfree(link); + } +} + +/* + * find_css_set() takes an existing cgroup group and a + * cgroup object, and returns a css_set object that's + * equivalent to the old group, but with the given cgroup + * substituted into the appropriate hierarchy. Must be called with + * cgroup_mutex held + */ + +static struct css_set *find_css_set( + struct css_set *oldcg, struct cgroup *cgrp) +{ + struct css_set *res; + struct cgroup_subsys_state *template[CGROUP_SUBSYS_COUNT]; + int i; + + struct list_head tmp_cg_links; + struct cg_cgroup_link *link; + + /* First see if we already have a cgroup group that matches + * the desired set */ + write_lock(&css_set_lock); + res = find_existing_css_set(oldcg, cgrp, template); + if (res) + get_css_set(res); + write_unlock(&css_set_lock); + + if (res) + return res; + + res = kmalloc(sizeof(*res), GFP_KERNEL); + if (!res) + return NULL; + + /* Allocate all the cg_cgroup_link objects that we'll need */ + if (allocate_cg_links(root_count, &tmp_cg_links) < 0) { + kfree(res); + return NULL; + } + + kref_init(&res->ref); + INIT_LIST_HEAD(&res->cg_links); + INIT_LIST_HEAD(&res->tasks); + + /* Copy the set of subsystem state objects generated in + * find_existing_css_set() */ + memcpy(res->subsys, template, sizeof(res->subsys)); + + write_lock(&css_set_lock); + /* Add reference counts and links from the new css_set. */ + for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) { + struct cgroup *cgrp = res->subsys[i]->cgroup; + struct cgroup_subsys *ss = subsys[i]; + atomic_inc(&cgrp->count); + /* + * We want to add a link once per cgroup, so we + * only do it for the first subsystem in each + * hierarchy + */ + if (ss->root->subsys_list.next == &ss->sibling) { + BUG_ON(list_empty(&tmp_cg_links)); + link = list_entry(tmp_cg_links.next, + struct cg_cgroup_link, + cgrp_link_list); + list_del(&link->cgrp_link_list); + list_add(&link->cgrp_link_list, &cgrp->css_sets); + link->cg = res; + list_add(&link->cg_link_list, &res->cg_links); + } + } + if (list_empty(&rootnode.subsys_list)) { + link = list_entry(tmp_cg_links.next, + struct cg_cgroup_link, + cgrp_link_list); + list_del(&link->cgrp_link_list); + list_add(&link->cgrp_link_list, &dummytop->css_sets); + link->cg = res; + list_add(&link->cg_link_list, &res->cg_links); + } + + BUG_ON(!list_empty(&tmp_cg_links)); + + /* Link this cgroup group into the list */ + list_add(&res->list, &init_css_set.list); + css_set_count++; + INIT_LIST_HEAD(&res->tasks); + write_unlock(&css_set_lock); + + return res; } /* @@ -214,8 +533,6 @@ static void put_css_set(struct css_set *cg) * update of a tasks cgroup pointer by attach_task() */ -static DEFINE_MUTEX(cgroup_mutex); - /** * cgroup_lock - lock out any changes to cgroup structures * @@ -246,7 +563,7 @@ void cgroup_unlock(void) static int cgroup_mkdir(struct inode *dir, struct dentry *dentry, int mode); static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry); -static int cgroup_populate_dir(struct cgroup *cont); +static int cgroup_populate_dir(struct cgroup *cgrp); static struct inode_operations cgroup_dir_inode_operations; static struct file_operations proc_cgroupstats_operations; @@ -273,9 +590,16 @@ static void cgroup_diput(struct dentry *dentry, struct inode *inode) { /* is dentry a directory ? if so, kfree() associated cgroup */ if (S_ISDIR(inode->i_mode)) { - struct cgroup *cont = dentry->d_fsdata; - BUG_ON(!(cgroup_is_removed(cont))); - kfree(cont); + struct cgroup *cgrp = dentry->d_fsdata; + BUG_ON(!(cgroup_is_removed(cgrp))); + /* It's possible for external users to be holding css + * reference counts on a cgroup; css_put() needs to + * be able to access the cgroup after decrementing + * the reference count in order to know if it needs to + * queue the cgroup to be handled by the release + * agent */ + synchronize_rcu(); + kfree(cgrp); } iput(inode); } @@ -332,7 +656,7 @@ static int rebind_subsystems(struct cgroupfs_root *root, unsigned long final_bits) { unsigned long added_bits, removed_bits; - struct cgroup *cont = &root->top_cgroup; + struct cgroup *cgrp = &root->top_cgroup; int i; removed_bits = root->actual_subsys_bits & ~final_bits; @@ -353,7 +677,7 @@ static int rebind_subsystems(struct cgroupfs_root *root, * any child cgroups exist. This is theoretically supportable * but involves complex error handling, so it's being left until * later */ - if (!list_empty(&cont->children)) + if (!list_empty(&cgrp->children)) return -EBUSY; /* Process each subsystem */ @@ -362,32 +686,32 @@ static int rebind_subsystems(struct cgroupfs_root *root, unsigned long bit = 1UL << i; if (bit & added_bits) { /* We're binding this subsystem to this hierarchy */ - BUG_ON(cont->subsys[i]); + BUG_ON(cgrp->subsys[i]); BUG_ON(!dummytop->subsys[i]); BUG_ON(dummytop->subsys[i]->cgroup != dummytop); - cont->subsys[i] = dummytop->subsys[i]; - cont->subsys[i]->cgroup = cont; + cgrp->subsys[i] = dummytop->subsys[i]; + cgrp->subsys[i]->cgroup = cgrp; list_add(&ss->sibling, &root->subsys_list); rcu_assign_pointer(ss->root, root); if (ss->bind) - ss->bind(ss, cont); + ss->bind(ss, cgrp); } else if (bit & removed_bits) { /* We're removing this subsystem */ - BUG_ON(cont->subsys[i] != dummytop->subsys[i]); - BUG_ON(cont->subsys[i]->cgroup != cont); + BUG_ON(cgrp->subsys[i] != dummytop->subsys[i]); + BUG_ON(cgrp->subsys[i]->cgroup != cgrp); if (ss->bind) ss->bind(ss, dummytop); dummytop->subsys[i]->cgroup = dummytop; - cont->subsys[i] = NULL; + cgrp->subsys[i] = NULL; rcu_assign_pointer(subsys[i]->root, &rootnode); list_del(&ss->sibling); } else if (bit & final_bits) { /* Subsystem state should already exist */ - BUG_ON(!cont->subsys[i]); + BUG_ON(!cgrp->subsys[i]); } else { /* Subsystem state shouldn't exist */ - BUG_ON(cont->subsys[i]); + BUG_ON(cgrp->subsys[i]); } } root->subsys_bits = root->actual_subsys_bits = final_bits; @@ -406,6 +730,8 @@ static int cgroup_show_options(struct seq_file *seq, struct vfsmount *vfs) seq_printf(seq, ",%s", ss->name); if (test_bit(ROOT_NOPREFIX, &root->flags)) seq_puts(seq, ",noprefix"); + if (strlen(root->release_agent_path)) + seq_printf(seq, ",release_agent=%s", root->release_agent_path); mutex_unlock(&cgroup_mutex); return 0; } @@ -413,6 +739,7 @@ static int cgroup_show_options(struct seq_file *seq, struct vfsmount *vfs) struct cgroup_sb_opts { unsigned long subsys_bits; unsigned long flags; + char *release_agent; }; /* Convert a hierarchy specifier into a bitmask of subsystems and @@ -424,6 +751,7 @@ static int parse_cgroupfs_options(char *data, opts->subsys_bits = 0; opts->flags = 0; + opts->release_agent = NULL; while ((token = strsep(&o, ",")) != NULL) { if (!*token) @@ -432,6 +760,15 @@ static int parse_cgroupfs_options(char *data, opts->subsys_bits = (1 << CGROUP_SUBSYS_COUNT) - 1; } else if (!strcmp(token, "noprefix")) { set_bit(ROOT_NOPREFIX, &opts->flags); + } else if (!strncmp(token, "release_agent=", 14)) { + /* Specifying two release agents is forbidden */ + if (opts->release_agent) + return -EINVAL; + opts->release_agent = kzalloc(PATH_MAX, GFP_KERNEL); + if (!opts->release_agent) + return -ENOMEM; + strncpy(opts->release_agent, token + 14, PATH_MAX - 1); + opts->release_agent[PATH_MAX - 1] = 0; } else { struct cgroup_subsys *ss; int i; @@ -458,10 +795,10 @@ static int cgroup_remount(struct super_block *sb, int *flags, char *data) { int ret = 0; struct cgroupfs_root *root = sb->s_fs_info; - struct cgroup *cont = &root->top_cgroup; + struct cgroup *cgrp = &root->top_cgroup; struct cgroup_sb_opts opts; - mutex_lock(&cont->dentry->d_inode->i_mutex); + mutex_lock(&cgrp->dentry->d_inode->i_mutex); mutex_lock(&cgroup_mutex); /* See what subsystems are wanted */ @@ -479,11 +816,15 @@ static int cgroup_remount(struct super_block *sb, int *flags, char *data) /* (re)populate subsystem files */ if (!ret) - cgroup_populate_dir(cont); + cgroup_populate_dir(cgrp); + if (opts.release_agent) + strcpy(root->release_agent_path, opts.release_agent); out_unlock: + if (opts.release_agent) + kfree(opts.release_agent); mutex_unlock(&cgroup_mutex); - mutex_unlock(&cont->dentry->d_inode->i_mutex); + mutex_unlock(&cgrp->dentry->d_inode->i_mutex); return ret; } @@ -496,14 +837,16 @@ static struct super_operations cgroup_ops = { static void init_cgroup_root(struct cgroupfs_root *root) { - struct cgroup *cont = &root->top_cgroup; + struct cgroup *cgrp = &root->top_cgroup; INIT_LIST_HEAD(&root->subsys_list); INIT_LIST_HEAD(&root->root_list); root->number_of_cgroups = 1; - cont->root = root; - cont->top_cgroup = cont; - INIT_LIST_HEAD(&cont->sibling); - INIT_LIST_HEAD(&cont->children); + cgrp->root = root; + cgrp->top_cgroup = cgrp; + INIT_LIST_HEAD(&cgrp->sibling); + INIT_LIST_HEAD(&cgrp->children); + INIT_LIST_HEAD(&cgrp->css_sets); + INIT_LIST_HEAD(&cgrp->release_list); } static int cgroup_test_super(struct super_block *sb, void *data) @@ -573,11 +916,16 @@ static int cgroup_get_sb(struct file_system_type *fs_type, int ret = 0; struct super_block *sb; struct cgroupfs_root *root; + struct list_head tmp_cg_links, *l; + INIT_LIST_HEAD(&tmp_cg_links); /* First find the desired set of subsystems */ ret = parse_cgroupfs_options(data, &opts); - if (ret) + if (ret) { + if (opts.release_agent) + kfree(opts.release_agent); return ret; + } root = kzalloc(sizeof(*root), GFP_KERNEL); if (!root) @@ -586,6 +934,10 @@ static int cgroup_get_sb(struct file_system_type *fs_type, init_cgroup_root(root); root->subsys_bits = opts.subsys_bits; root->flags = opts.flags; + if (opts.release_agent) { + strcpy(root->release_agent_path, opts.release_agent); + kfree(opts.release_agent); + } sb = sget(fs_type, cgroup_test_super, cgroup_set_super, root); @@ -601,19 +953,37 @@ static int cgroup_get_sb(struct file_system_type *fs_type, root = NULL; } else { /* New superblock */ - struct cgroup *cont = &root->top_cgroup; + struct cgroup *cgrp = &root->top_cgroup; + struct inode *inode; BUG_ON(sb->s_root != NULL); ret = cgroup_get_rootdir(sb); if (ret) goto drop_new_super; + inode = sb->s_root->d_inode; + mutex_lock(&inode->i_mutex); mutex_lock(&cgroup_mutex); + /* + * We're accessing css_set_count without locking + * css_set_lock here, but that's OK - it can only be + * increased by someone holding cgroup_lock, and + * that's us. The worst that can happen is that we + * have some link structures left over + */ + ret = allocate_cg_links(css_set_count, &tmp_cg_links); + if (ret) { + mutex_unlock(&cgroup_mutex); + mutex_unlock(&inode->i_mutex); + goto drop_new_super; + } + ret = rebind_subsystems(root, root->subsys_bits); if (ret == -EBUSY) { mutex_unlock(&cgroup_mutex); + mutex_unlock(&inode->i_mutex); goto drop_new_super; } @@ -621,24 +991,40 @@ static int cgroup_get_sb(struct file_system_type *fs_type, BUG_ON(ret); list_add(&root->root_list, &roots); + root_count++; sb->s_root->d_fsdata = &root->top_cgroup; root->top_cgroup.dentry = sb->s_root; - BUG_ON(!list_empty(&cont->sibling)); - BUG_ON(!list_empty(&cont->children)); + /* Link the top cgroup in this hierarchy into all + * the css_set objects */ + write_lock(&css_set_lock); + l = &init_css_set.list; + do { + struct css_set *cg; + struct cg_cgroup_link *link; + cg = list_entry(l, struct css_set, list); + BUG_ON(list_empty(&tmp_cg_links)); + link = list_entry(tmp_cg_links.next, + struct cg_cgroup_link, + cgrp_link_list); + list_del(&link->cgrp_link_list); + link->cg = cg; + list_add(&link->cgrp_link_list, + &root->top_cgroup.css_sets); + list_add(&link->cg_link_list, &cg->cg_links); + l = l->next; + } while (l != &init_css_set.list); + write_unlock(&css_set_lock); + + free_cg_links(&tmp_cg_links); + + BUG_ON(!list_empty(&cgrp->sibling)); + BUG_ON(!list_empty(&cgrp->children)); BUG_ON(root->number_of_cgroups != 1); - /* - * I believe that it's safe to nest i_mutex inside - * cgroup_mutex in this case, since no-one else can - * be accessing this directory yet. But we still need - * to teach lockdep that this is the case - currently - * a cgroupfs remount triggers a lockdep warning - */ - mutex_lock(&cont->dentry->d_inode->i_mutex); - cgroup_populate_dir(cont); - mutex_unlock(&cont->dentry->d_inode->i_mutex); + cgroup_populate_dir(cgrp); + mutex_unlock(&inode->i_mutex); mutex_unlock(&cgroup_mutex); } @@ -647,19 +1033,20 @@ static int cgroup_get_sb(struct file_system_type *fs_type, drop_new_super: up_write(&sb->s_umount); deactivate_super(sb); + free_cg_links(&tmp_cg_links); return ret; } static void cgroup_kill_sb(struct super_block *sb) { struct cgroupfs_root *root = sb->s_fs_info; - struct cgroup *cont = &root->top_cgroup; + struct cgroup *cgrp = &root->top_cgroup; int ret; BUG_ON(!root); BUG_ON(root->number_of_cgroups != 1); - BUG_ON(!list_empty(&cont->children)); - BUG_ON(!list_empty(&cont->sibling)); + BUG_ON(!list_empty(&cgrp->children)); + BUG_ON(!list_empty(&cgrp->sibling)); mutex_lock(&cgroup_mutex); @@ -668,8 +1055,25 @@ static void cgroup_kill_sb(struct super_block *sb) { /* Shouldn't be able to fail ... */ BUG_ON(ret); - if (!list_empty(&root->root_list)) + /* + * Release all the links from css_sets to this hierarchy's + * root cgroup + */ + write_lock(&css_set_lock); + while (!list_empty(&cgrp->css_sets)) { + struct cg_cgroup_link *link; + link = list_entry(cgrp->css_sets.next, + struct cg_cgroup_link, cgrp_link_list); + list_del(&link->cg_link_list); + list_del(&link->cgrp_link_list); + kfree(link); + } + write_unlock(&css_set_lock); + + if (!list_empty(&root->root_list)) { list_del(&root->root_list); + root_count--; + } mutex_unlock(&cgroup_mutex); kfree(root); @@ -682,7 +1086,7 @@ static struct file_system_type cgroup_fs_type = { .kill_sb = cgroup_kill_sb, }; -static inline struct cgroup *__d_cont(struct dentry *dentry) +static inline struct cgroup *__d_cgrp(struct dentry *dentry) { return dentry->d_fsdata; } @@ -696,11 +1100,11 @@ static inline struct cftype *__d_cft(struct dentry *dentry) * Called with cgroup_mutex held. Writes path of cgroup into buf. * Returns 0 on success, -errno on error. */ -int cgroup_path(const struct cgroup *cont, char *buf, int buflen) +int cgroup_path(const struct cgroup *cgrp, char *buf, int buflen) { char *start; - if (cont == dummytop) { + if (cgrp == dummytop) { /* * Inactive subsystems have no dentry for their root * cgroup @@ -713,14 +1117,14 @@ int cgroup_path(const struct cgroup *cont, char *buf, int buflen) *--start = '\0'; for (;;) { - int len = cont->dentry->d_name.len; + int len = cgrp->dentry->d_name.len; if ((start -= len) < buf) return -ENAMETOOLONG; - memcpy(start, cont->dentry->d_name.name, len); - cont = cont->parent; - if (!cont) + memcpy(start, cgrp->dentry->d_name.name, len); + cgrp = cgrp->parent; + if (!cgrp) break; - if (!cont->parent) + if (!cgrp->parent) continue; if (--start < buf) return -ENAMETOOLONG; @@ -735,16 +1139,16 @@ int cgroup_path(const struct cgroup *cont, char *buf, int buflen) * its subsystem id. */ -static void get_first_subsys(const struct cgroup *cont, +static void get_first_subsys(const struct cgroup *cgrp, struct cgroup_subsys_state **css, int *subsys_id) { - const struct cgroupfs_root *root = cont->root; + const struct cgroupfs_root *root = cgrp->root; const struct cgroup_subsys *test_ss; BUG_ON(list_empty(&root->subsys_list)); test_ss = list_entry(root->subsys_list.next, struct cgroup_subsys, sibling); if (css) { - *css = cont->subsys[test_ss->subsys_id]; + *css = cgrp->subsys[test_ss->subsys_id]; BUG_ON(!*css); } if (subsys_id) @@ -752,72 +1156,79 @@ static void get_first_subsys(const struct cgroup *cont, } /* - * Attach task 'tsk' to cgroup 'cont' + * Attach task 'tsk' to cgroup 'cgrp' * * Call holding cgroup_mutex. May take task_lock of * the task 'pid' during call. */ -static int attach_task(struct cgroup *cont, struct task_struct *tsk) +static int attach_task(struct cgroup *cgrp, struct task_struct *tsk) { int retval = 0; struct cgroup_subsys *ss; - struct cgroup *oldcont; - struct css_set *cg = &tsk->cgroups; - struct cgroupfs_root *root = cont->root; - int i; + struct cgroup *oldcgrp; + struct css_set *cg = tsk->cgroups; + struct css_set *newcg; + struct cgroupfs_root *root = cgrp->root; int subsys_id; - get_first_subsys(cont, NULL, &subsys_id); + get_first_subsys(cgrp, NULL, &subsys_id); /* Nothing to do if the task is already in that cgroup */ - oldcont = task_cgroup(tsk, subsys_id); - if (cont == oldcont) + oldcgrp = task_cgroup(tsk, subsys_id); + if (cgrp == oldcgrp) return 0; for_each_subsys(root, ss) { if (ss->can_attach) { - retval = ss->can_attach(ss, cont, tsk); + retval = ss->can_attach(ss, cgrp, tsk); if (retval) { return retval; } } } + /* + * Locate or allocate a new css_set for this task, + * based on its final set of cgroups + */ + newcg = find_css_set(cg, cgrp); + if (!newcg) { + return -ENOMEM; + } + task_lock(tsk); if (tsk->flags & PF_EXITING) { task_unlock(tsk); + put_css_set(newcg); return -ESRCH; } - /* Update the css_set pointers for the subsystems in this - * hierarchy */ - for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) { - if (root->subsys_bits & (1ull << i)) { - /* Subsystem is in this hierarchy. So we want - * the subsystem state from the new - * cgroup. Transfer the refcount from the - * old to the new */ - atomic_inc(&cont->count); - atomic_dec(&cg->subsys[i]->cgroup->count); - rcu_assign_pointer(cg->subsys[i], cont->subsys[i]); - } - } + rcu_assign_pointer(tsk->cgroups, newcg); task_unlock(tsk); + /* Update the css_set linked lists if we're using them */ + write_lock(&css_set_lock); + if (!list_empty(&tsk->cg_list)) { + list_del(&tsk->cg_list); + list_add(&tsk->cg_list, &newcg->tasks); + } + write_unlock(&css_set_lock); + for_each_subsys(root, ss) { if (ss->attach) { - ss->attach(ss, cont, oldcont, tsk); + ss->attach(ss, cgrp, oldcgrp, tsk); } } - + set_bit(CGRP_RELEASABLE, &oldcgrp->flags); synchronize_rcu(); + put_css_set(cg); return 0; } /* - * Attach task with pid 'pid' to cgroup 'cont'. Call with + * Attach task with pid 'pid' to cgroup 'cgrp'. Call with * cgroup_mutex, may take task_lock of task */ -static int attach_task_by_pid(struct cgroup *cont, char *pidbuf) +static int attach_task_by_pid(struct cgroup *cgrp, char *pidbuf) { pid_t pid; struct task_struct *tsk; @@ -846,7 +1257,7 @@ static int attach_task_by_pid(struct cgroup *cont, char *pidbuf) get_task_struct(tsk); } - ret = attach_task(cont, tsk); + ret = attach_task(cgrp, tsk); put_task_struct(tsk); return ret; } @@ -857,9 +1268,12 @@ enum cgroup_filetype { FILE_ROOT, FILE_DIR, FILE_TASKLIST, + FILE_NOTIFY_ON_RELEASE, + FILE_RELEASABLE, + FILE_RELEASE_AGENT, }; -static ssize_t cgroup_write_uint(struct cgroup *cont, struct cftype *cft, +static ssize_t cgroup_write_uint(struct cgroup *cgrp, struct cftype *cft, struct file *file, const char __user *userbuf, size_t nbytes, loff_t *unused_ppos) @@ -886,13 +1300,13 @@ static ssize_t cgroup_write_uint(struct cgroup *cont, struct cftype *cft, return -EINVAL; /* Pass to subsystem */ - retval = cft->write_uint(cont, cft, val); + retval = cft->write_uint(cgrp, cft, val); if (!retval) retval = nbytes; return retval; } -static ssize_t cgroup_common_file_write(struct cgroup *cont, +static ssize_t cgroup_common_file_write(struct cgroup *cgrp, struct cftype *cft, struct file *file, const char __user *userbuf, @@ -918,15 +1332,41 @@ static ssize_t cgroup_common_file_write(struct cgroup *cont, mutex_lock(&cgroup_mutex); - if (cgroup_is_removed(cont)) { + if (cgroup_is_removed(cgrp)) { retval = -ENODEV; goto out2; } switch (type) { case FILE_TASKLIST: - retval = attach_task_by_pid(cont, buffer); + retval = attach_task_by_pid(cgrp, buffer); + break; + case FILE_NOTIFY_ON_RELEASE: + clear_bit(CGRP_RELEASABLE, &cgrp->flags); + if (simple_strtoul(buffer, NULL, 10) != 0) + set_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags); + else + clear_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags); + break; + case FILE_RELEASE_AGENT: + { + struct cgroupfs_root *root = cgrp->root; + /* Strip trailing newline */ + if (nbytes && (buffer[nbytes-1] == '\n')) { + buffer[nbytes-1] = 0; + } + if (nbytes < sizeof(root->release_agent_path)) { + /* We never write anything other than '\0' + * into the last char of release_agent_path, + * so it always remains a NUL-terminated + * string */ + strncpy(root->release_agent_path, buffer, nbytes); + root->release_agent_path[nbytes] = 0; + } else { + retval = -ENOSPC; + } break; + } default: retval = -EINVAL; goto out2; @@ -945,42 +1385,85 @@ static ssize_t cgroup_file_write(struct file *file, const char __user *buf, size_t nbytes, loff_t *ppos) { struct cftype *cft = __d_cft(file->f_dentry); - struct cgroup *cont = __d_cont(file->f_dentry->d_parent); + struct cgroup *cgrp = __d_cgrp(file->f_dentry->d_parent); if (!cft) return -ENODEV; if (cft->write) - return cft->write(cont, cft, file, buf, nbytes, ppos); + return cft->write(cgrp, cft, file, buf, nbytes, ppos); if (cft->write_uint) - return cgroup_write_uint(cont, cft, file, buf, nbytes, ppos); + return cgroup_write_uint(cgrp, cft, file, buf, nbytes, ppos); return -EINVAL; } -static ssize_t cgroup_read_uint(struct cgroup *cont, struct cftype *cft, +static ssize_t cgroup_read_uint(struct cgroup *cgrp, struct cftype *cft, struct file *file, char __user *buf, size_t nbytes, loff_t *ppos) { char tmp[64]; - u64 val = cft->read_uint(cont, cft); + u64 val = cft->read_uint(cgrp, cft); int len = sprintf(tmp, "%llu\n", (unsigned long long) val); return simple_read_from_buffer(buf, nbytes, ppos, tmp, len); } +static ssize_t cgroup_common_file_read(struct cgroup *cgrp, + struct cftype *cft, + struct file *file, + char __user *buf, + size_t nbytes, loff_t *ppos) +{ + enum cgroup_filetype type = cft->private; + char *page; + ssize_t retval = 0; + char *s; + + if (!(page = (char *)__get_free_page(GFP_KERNEL))) + return -ENOMEM; + + s = page; + + switch (type) { + case FILE_RELEASE_AGENT: + { + struct cgroupfs_root *root; + size_t n; + mutex_lock(&cgroup_mutex); + root = cgrp->root; + n = strnlen(root->release_agent_path, + sizeof(root->release_agent_path)); + n = min(n, (size_t) PAGE_SIZE); + strncpy(s, root->release_agent_path, n); + mutex_unlock(&cgroup_mutex); + s += n; + break; + } + default: + retval = -EINVAL; + goto out; + } + *s++ = '\n'; + + retval = simple_read_from_buffer(buf, nbytes, ppos, page, s - page); +out: + free_page((unsigned long)page); + return retval; +} + static ssize_t cgroup_file_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos) { struct cftype *cft = __d_cft(file->f_dentry); - struct cgroup *cont = __d_cont(file->f_dentry->d_parent); + struct cgroup *cgrp = __d_cgrp(file->f_dentry->d_parent); if (!cft) return -ENODEV; if (cft->read) - return cft->read(cont, cft, file, buf, nbytes, ppos); + return cft->read(cgrp, cft, file, buf, nbytes, ppos); if (cft->read_uint) - return cgroup_read_uint(cont, cft, file, buf, nbytes, ppos); + return cgroup_read_uint(cgrp, cft, file, buf, nbytes, ppos); return -EINVAL; } @@ -1069,7 +1552,7 @@ static int cgroup_create_file(struct dentry *dentry, int mode, /* start with the directory inode held, so that we can * populate it without racing with another mkdir */ - mutex_lock(&inode->i_mutex); + mutex_lock_nested(&inode->i_mutex, I_MUTEX_CHILD); } else if (S_ISREG(mode)) { inode->i_size = 0; inode->i_fop = &cgroup_file_operations; @@ -1082,24 +1565,24 @@ static int cgroup_create_file(struct dentry *dentry, int mode, /* * cgroup_create_dir - create a directory for an object. - * cont: the cgroup we create the directory for. + * cgrp: the cgroup we create the directory for. * It must have a valid ->parent field * And we are going to fill its ->dentry field. - * dentry: dentry of the new container + * dentry: dentry of the new cgroup * mode: mode to set on new directory. */ -static int cgroup_create_dir(struct cgroup *cont, struct dentry *dentry, +static int cgroup_create_dir(struct cgroup *cgrp, struct dentry *dentry, int mode) { struct dentry *parent; int error = 0; - parent = cont->parent->dentry; - error = cgroup_create_file(dentry, S_IFDIR | mode, cont->root->sb); + parent = cgrp->parent->dentry; + error = cgroup_create_file(dentry, S_IFDIR | mode, cgrp->root->sb); if (!error) { - dentry->d_fsdata = cont; + dentry->d_fsdata = cgrp; inc_nlink(parent->d_inode); - cont->dentry = dentry; + cgrp->dentry = dentry; dget(dentry); } dput(dentry); @@ -1107,16 +1590,16 @@ static int cgroup_create_dir(struct cgroup *cont, struct dentry *dentry, return error; } -int cgroup_add_file(struct cgroup *cont, +int cgroup_add_file(struct cgroup *cgrp, struct cgroup_subsys *subsys, const struct cftype *cft) { - struct dentry *dir = cont->dentry; + struct dentry *dir = cgrp->dentry; struct dentry *dentry; int error; char name[MAX_CGROUP_TYPE_NAMELEN + MAX_CFTYPE_NAME + 2] = { 0 }; - if (subsys && !test_bit(ROOT_NOPREFIX, &cont->root->flags)) { + if (subsys && !test_bit(ROOT_NOPREFIX, &cgrp->root->flags)) { strcpy(name, subsys->name); strcat(name, "."); } @@ -1125,7 +1608,7 @@ int cgroup_add_file(struct cgroup *cont, dentry = lookup_one_len(name, dir, strlen(name)); if (!IS_ERR(dentry)) { error = cgroup_create_file(dentry, 0644 | S_IFREG, - cont->root->sb); + cgrp->root->sb); if (!error) dentry->d_fsdata = (void *)cft; dput(dentry); @@ -1134,41 +1617,115 @@ int cgroup_add_file(struct cgroup *cont, return error; } -int cgroup_add_files(struct cgroup *cont, +int cgroup_add_files(struct cgroup *cgrp, struct cgroup_subsys *subsys, const struct cftype cft[], int count) { int i, err; for (i = 0; i < count; i++) { - err = cgroup_add_file(cont, subsys, &cft[i]); + err = cgroup_add_file(cgrp, subsys, &cft[i]); if (err) return err; } return 0; } -/* Count the number of tasks in a cgroup. Could be made more - * time-efficient but less space-efficient with more linked lists - * running through each cgroup and the css_set structures that - * referenced it. Must be called with tasklist_lock held for read or - * write or in an rcu critical section. - */ -int __cgroup_task_count(const struct cgroup *cont) +/* Count the number of tasks in a cgroup. */ + +int cgroup_task_count(const struct cgroup *cgrp) { int count = 0; - struct task_struct *g, *p; - struct cgroup_subsys_state *css; - int subsys_id; - - get_first_subsys(cont, &css, &subsys_id); - do_each_thread(g, p) { - if (task_subsys_state(p, subsys_id) == css) - count ++; - } while_each_thread(g, p); + struct list_head *l; + + read_lock(&css_set_lock); + l = cgrp->css_sets.next; + while (l != &cgrp->css_sets) { + struct cg_cgroup_link *link = + list_entry(l, struct cg_cgroup_link, cgrp_link_list); + count += atomic_read(&link->cg->ref.refcount); + l = l->next; + } + read_unlock(&css_set_lock); return count; } +/* + * Advance a list_head iterator. The iterator should be positioned at + * the start of a css_set + */ +static void cgroup_advance_iter(struct cgroup *cgrp, + struct cgroup_iter *it) +{ + struct list_head *l = it->cg_link; + struct cg_cgroup_link *link; + struct css_set *cg; + + /* Advance to the next non-empty css_set */ + do { + l = l->next; + if (l == &cgrp->css_sets) { + it->cg_link = NULL; + return; + } + link = list_entry(l, struct cg_cgroup_link, cgrp_link_list); + cg = link->cg; + } while (list_empty(&cg->tasks)); + it->cg_link = l; + it->task = cg->tasks.next; +} + +void cgroup_iter_start(struct cgroup *cgrp, struct cgroup_iter *it) +{ + /* + * The first time anyone tries to iterate across a cgroup, + * we need to enable the list linking each css_set to its + * tasks, and fix up all existing tasks. + */ + if (!use_task_css_set_links) { + struct task_struct *p, *g; + write_lock(&css_set_lock); + use_task_css_set_links = 1; + do_each_thread(g, p) { + task_lock(p); + if (list_empty(&p->cg_list)) + list_add(&p->cg_list, &p->cgroups->tasks); + task_unlock(p); + } while_each_thread(g, p); + write_unlock(&css_set_lock); + } + read_lock(&css_set_lock); + it->cg_link = &cgrp->css_sets; + cgroup_advance_iter(cgrp, it); +} + +struct task_struct *cgroup_iter_next(struct cgroup *cgrp, + struct cgroup_iter *it) +{ + struct task_struct *res; + struct list_head *l = it->task; + + /* If the iterator cg is NULL, we have no tasks */ + if (!it->cg_link) + return NULL; + res = list_entry(l, struct task_struct, cg_list); + /* Advance iterator to find next entry */ + l = l->next; + if (l == &res->cgroups->tasks) { + /* We reached the end of this task list - move on to + * the next cg_cgroup_link */ + cgroup_advance_iter(cgrp, it); + } else { + it->task = l; + } + return res; +} + +void cgroup_iter_end(struct cgroup *cgrp, struct cgroup_iter *it) +{ + read_unlock(&css_set_lock); +} + /* * Stuff for reading the 'tasks' file. * @@ -1190,31 +1747,76 @@ struct ctr_struct { /* * Load into 'pidarray' up to 'npids' of the tasks using cgroup - * 'cont'. Return actual number of pids loaded. No need to + * 'cgrp'. Return actual number of pids loaded. No need to * task_lock(p) when reading out p->cgroup, since we're in an RCU * read section, so the css_set can't go away, and is * immutable after creation. */ -static int pid_array_load(pid_t *pidarray, int npids, struct cgroup *cont) +static int pid_array_load(pid_t *pidarray, int npids, struct cgroup *cgrp) { int n = 0; - struct task_struct *g, *p; - struct cgroup_subsys_state *css; - int subsys_id; + struct cgroup_iter it; + struct task_struct *tsk; + cgroup_iter_start(cgrp, &it); + while ((tsk = cgroup_iter_next(cgrp, &it))) { + if (unlikely(n == npids)) + break; + pidarray[n++] = task_pid_nr(tsk); + } + cgroup_iter_end(cgrp, &it); + return n; +} - get_first_subsys(cont, &css, &subsys_id); +/** + * Build and fill cgroupstats so that taskstats can export it to user + * space. + * + * @stats: cgroupstats to fill information into + * @dentry: A dentry entry belonging to the cgroup for which stats have + * been requested. + */ +int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry) +{ + int ret = -EINVAL; + struct cgroup *cgrp; + struct cgroup_iter it; + struct task_struct *tsk; + /* + * Validate dentry by checking the superblock operations + */ + if (dentry->d_sb->s_op != &cgroup_ops) + goto err; + + ret = 0; + cgrp = dentry->d_fsdata; rcu_read_lock(); - do_each_thread(g, p) { - if (task_subsys_state(p, subsys_id) == css) { - pidarray[n++] = pid_nr(task_pid(p)); - if (unlikely(n == npids)) - goto array_full; + + cgroup_iter_start(cgrp, &it); + while ((tsk = cgroup_iter_next(cgrp, &it))) { + switch (tsk->state) { + case TASK_RUNNING: + stats->nr_running++; + break; + case TASK_INTERRUPTIBLE: + stats->nr_sleeping++; + break; + case TASK_UNINTERRUPTIBLE: + stats->nr_uninterruptible++; + break; + case TASK_STOPPED: + stats->nr_stopped++; + break; + default: + if (delayacct_is_task_waiting_on_io(tsk)) + stats->nr_io_wait++; + break; } - } while_each_thread(g, p); + } + cgroup_iter_end(cgrp, &it); -array_full: rcu_read_unlock(); - return n; +err: + return ret; } static int cmppid(const void *a, const void *b) @@ -1245,7 +1847,7 @@ static int pid_array_to_buf(char *buf, int sz, pid_t *a, int npids) */ static int cgroup_tasks_open(struct inode *unused, struct file *file) { - struct cgroup *cont = __d_cont(file->f_dentry->d_parent); + struct cgroup *cgrp = __d_cgrp(file->f_dentry->d_parent); struct ctr_struct *ctr; pid_t *pidarray; int npids; @@ -1264,13 +1866,13 @@ static int cgroup_tasks_open(struct inode *unused, struct file *file) * caller from the case that the additional cgroup users didn't * show up until sometime later on. */ - npids = cgroup_task_count(cont); + npids = cgroup_task_count(cgrp); if (npids) { pidarray = kmalloc(npids * sizeof(pid_t), GFP_KERNEL); if (!pidarray) goto err1; - npids = pid_array_load(pidarray, npids, cont); + npids = pid_array_load(pidarray, npids, cgrp); sort(pidarray, npids, sizeof(pid_t), cmppid, NULL); /* Call pid_array_to_buf() twice, first just to get bufsz */ @@ -1296,7 +1898,7 @@ err0: return -ENOMEM; } -static ssize_t cgroup_tasks_read(struct cgroup *cont, +static ssize_t cgroup_tasks_read(struct cgroup *cgrp, struct cftype *cft, struct file *file, char __user *buf, size_t nbytes, loff_t *ppos) @@ -1319,32 +1921,70 @@ static int cgroup_tasks_release(struct inode *unused_inode, return 0; } +static u64 cgroup_read_notify_on_release(struct cgroup *cgrp, + struct cftype *cft) +{ + return notify_on_release(cgrp); +} + +static u64 cgroup_read_releasable(struct cgroup *cgrp, struct cftype *cft) +{ + return test_bit(CGRP_RELEASABLE, &cgrp->flags); +} + /* * for the common functions, 'private' gives the type of file */ -static struct cftype cft_tasks = { - .name = "tasks", - .open = cgroup_tasks_open, - .read = cgroup_tasks_read, +static struct cftype files[] = { + { + .name = "tasks", + .open = cgroup_tasks_open, + .read = cgroup_tasks_read, + .write = cgroup_common_file_write, + .release = cgroup_tasks_release, + .private = FILE_TASKLIST, + }, + + { + .name = "notify_on_release", + .read_uint = cgroup_read_notify_on_release, + .write = cgroup_common_file_write, + .private = FILE_NOTIFY_ON_RELEASE, + }, + + { + .name = "releasable", + .read_uint = cgroup_read_releasable, + .private = FILE_RELEASABLE, + } +}; + +static struct cftype cft_release_agent = { + .name = "release_agent", + .read = cgroup_common_file_read, .write = cgroup_common_file_write, - .release = cgroup_tasks_release, - .private = FILE_TASKLIST, + .private = FILE_RELEASE_AGENT, }; -static int cgroup_populate_dir(struct cgroup *cont) +static int cgroup_populate_dir(struct cgroup *cgrp) { int err; struct cgroup_subsys *ss; /* First clear out any existing files */ - cgroup_clear_directory(cont->dentry); + cgroup_clear_directory(cgrp->dentry); - err = cgroup_add_file(cont, NULL, &cft_tasks); + err = cgroup_add_files(cgrp, NULL, files, ARRAY_SIZE(files)); if (err < 0) return err; - for_each_subsys(cont->root, ss) { - if (ss->populate && (err = ss->populate(ss, cont)) < 0) + if (cgrp == cgrp->top_cgroup) { + if ((err = cgroup_add_file(cgrp, NULL, &cft_release_agent)) < 0) + return err; + } + + for_each_subsys(cgrp->root, ss) { + if (ss->populate && (err = ss->populate(ss, cgrp)) < 0) return err; } @@ -1353,15 +1993,15 @@ static int cgroup_populate_dir(struct cgroup *cont) static void init_cgroup_css(struct cgroup_subsys_state *css, struct cgroup_subsys *ss, - struct cgroup *cont) + struct cgroup *cgrp) { - css->cgroup = cont; + css->cgroup = cgrp; atomic_set(&css->refcnt, 0); css->flags = 0; - if (cont == dummytop) + if (cgrp == dummytop) set_bit(CSS_ROOT, &css->flags); - BUG_ON(cont->subsys[ss->subsys_id]); - cont->subsys[ss->subsys_id] = css; + BUG_ON(cgrp->subsys[ss->subsys_id]); + cgrp->subsys[ss->subsys_id] = css; } /* @@ -1376,14 +2016,14 @@ static void init_cgroup_css(struct cgroup_subsys_state *css, static long cgroup_create(struct cgroup *parent, struct dentry *dentry, int mode) { - struct cgroup *cont; + struct cgroup *cgrp; struct cgroupfs_root *root = parent->root; int err = 0; struct cgroup_subsys *ss; struct super_block *sb = root->sb; - cont = kzalloc(sizeof(*cont), GFP_KERNEL); - if (!cont) + cgrp = kzalloc(sizeof(*cgrp), GFP_KERNEL); + if (!cgrp) return -ENOMEM; /* Grab a reference on the superblock so the hierarchy doesn't @@ -1395,51 +2035,53 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry, mutex_lock(&cgroup_mutex); - cont->flags = 0; - INIT_LIST_HEAD(&cont->sibling); - INIT_LIST_HEAD(&cont->children); + cgrp->flags = 0; + INIT_LIST_HEAD(&cgrp->sibling); + INIT_LIST_HEAD(&cgrp->children); + INIT_LIST_HEAD(&cgrp->css_sets); + INIT_LIST_HEAD(&cgrp->release_list); - cont->parent = parent; - cont->root = parent->root; - cont->top_cgroup = parent->top_cgroup; + cgrp->parent = parent; + cgrp->root = parent->root; + cgrp->top_cgroup = parent->top_cgroup; for_each_subsys(root, ss) { - struct cgroup_subsys_state *css = ss->create(ss, cont); + struct cgroup_subsys_state *css = ss->create(ss, cgrp); if (IS_ERR(css)) { err = PTR_ERR(css); goto err_destroy; } - init_cgroup_css(css, ss, cont); + init_cgroup_css(css, ss, cgrp); } - list_add(&cont->sibling, &cont->parent->children); + list_add(&cgrp->sibling, &cgrp->parent->children); root->number_of_cgroups++; - err = cgroup_create_dir(cont, dentry, mode); + err = cgroup_create_dir(cgrp, dentry, mode); if (err < 0) goto err_remove; /* The cgroup directory was pre-locked for us */ - BUG_ON(!mutex_is_locked(&cont->dentry->d_inode->i_mutex)); + BUG_ON(!mutex_is_locked(&cgrp->dentry->d_inode->i_mutex)); - err = cgroup_populate_dir(cont); + err = cgroup_populate_dir(cgrp); /* If err < 0, we have a half-filled directory - oh well ;) */ mutex_unlock(&cgroup_mutex); - mutex_unlock(&cont->dentry->d_inode->i_mutex); + mutex_unlock(&cgrp->dentry->d_inode->i_mutex); return 0; err_remove: - list_del(&cont->sibling); + list_del(&cgrp->sibling); root->number_of_cgroups--; err_destroy: for_each_subsys(root, ss) { - if (cont->subsys[ss->subsys_id]) - ss->destroy(ss, cont); + if (cgrp->subsys[ss->subsys_id]) + ss->destroy(ss, cgrp); } mutex_unlock(&cgroup_mutex); @@ -1447,7 +2089,7 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry, /* Release the reference count that we took on the superblock */ deactivate_super(sb); - kfree(cont); + kfree(cgrp); return err; } @@ -1459,67 +2101,92 @@ static int cgroup_mkdir(struct inode *dir, struct dentry *dentry, int mode) return cgroup_create(c_parent, dentry, mode | S_IFDIR); } +static inline int cgroup_has_css_refs(struct cgroup *cgrp) +{ + /* Check the reference count on each subsystem. Since we + * already established that there are no tasks in the + * cgroup, if the css refcount is also 0, then there should + * be no outstanding references, so the subsystem is safe to + * destroy. We scan across all subsystems rather than using + * the per-hierarchy linked list of mounted subsystems since + * we can be called via check_for_release() with no + * synchronization other than RCU, and the subsystem linked + * list isn't RCU-safe */ + int i; + for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) { + struct cgroup_subsys *ss = subsys[i]; + struct cgroup_subsys_state *css; + /* Skip subsystems not in this hierarchy */ + if (ss->root != cgrp->root) + continue; + css = cgrp->subsys[ss->subsys_id]; + /* When called from check_for_release() it's possible + * that by this point the cgroup has been removed + * and the css deleted. But a false-positive doesn't + * matter, since it can only happen if the cgroup + * has been deleted and hence no longer needs the + * release agent to be called anyway. */ + if (css && atomic_read(&css->refcnt)) { + return 1; + } + } + return 0; +} + static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry) { - struct cgroup *cont = dentry->d_fsdata; + struct cgroup *cgrp = dentry->d_fsdata; struct dentry *d; struct cgroup *parent; struct cgroup_subsys *ss; struct super_block *sb; struct cgroupfs_root *root; - int css_busy = 0; /* the vfs holds both inode->i_mutex already */ mutex_lock(&cgroup_mutex); - if (atomic_read(&cont->count) != 0) { + if (atomic_read(&cgrp->count) != 0) { mutex_unlock(&cgroup_mutex); return -EBUSY; } - if (!list_empty(&cont->children)) { + if (!list_empty(&cgrp->children)) { mutex_unlock(&cgroup_mutex); return -EBUSY; } - parent = cont->parent; - root = cont->root; + parent = cgrp->parent; + root = cgrp->root; sb = root->sb; - /* Check the reference count on each subsystem. Since we - * already established that there are no tasks in the - * cgroup, if the css refcount is also 0, then there should - * be no outstanding references, so the subsystem is safe to - * destroy */ - for_each_subsys(root, ss) { - struct cgroup_subsys_state *css; - css = cont->subsys[ss->subsys_id]; - if (atomic_read(&css->refcnt)) { - css_busy = 1; - break; - } - } - if (css_busy) { + if (cgroup_has_css_refs(cgrp)) { mutex_unlock(&cgroup_mutex); return -EBUSY; } for_each_subsys(root, ss) { - if (cont->subsys[ss->subsys_id]) - ss->destroy(ss, cont); + if (cgrp->subsys[ss->subsys_id]) + ss->destroy(ss, cgrp); } - set_bit(CONT_REMOVED, &cont->flags); + spin_lock(&release_list_lock); + set_bit(CGRP_REMOVED, &cgrp->flags); + if (!list_empty(&cgrp->release_list)) + list_del(&cgrp->release_list); + spin_unlock(&release_list_lock); /* delete my sibling from parent->children */ - list_del(&cont->sibling); - spin_lock(&cont->dentry->d_lock); - d = dget(cont->dentry); - cont->dentry = NULL; + list_del(&cgrp->sibling); + spin_lock(&cgrp->dentry->d_lock); + d = dget(cgrp->dentry); + cgrp->dentry = NULL; spin_unlock(&d->d_lock); cgroup_d_remove_dir(d); dput(d); root->number_of_cgroups--; + set_bit(CGRP_RELEASABLE, &parent->flags); + check_for_release(parent); + mutex_unlock(&cgroup_mutex); /* Drop the active superblock reference that we took when we * created the cgroup */ @@ -1529,9 +2196,10 @@ static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry) static void cgroup_init_subsys(struct cgroup_subsys *ss) { - struct task_struct *g, *p; struct cgroup_subsys_state *css; - printk(KERN_ERR "Initializing cgroup subsys %s\n", ss->name); + struct list_head *l; + + printk(KERN_INFO "Initializing cgroup subsys %s\n", ss->name); /* Create the top cgroup state for this subsystem */ ss->root = &rootnode; @@ -1540,26 +2208,32 @@ static void cgroup_init_subsys(struct cgroup_subsys *ss) BUG_ON(IS_ERR(css)); init_cgroup_css(css, ss, dummytop); - /* Update all tasks to contain a subsys pointer to this state - * - since the subsystem is newly registered, all tasks are in - * the subsystem's top cgroup. */ + /* Update all cgroup groups to contain a subsys + * pointer to this state - since the subsystem is + * newly registered, all tasks and hence all cgroup + * groups are in the subsystem's top cgroup. */ + write_lock(&css_set_lock); + l = &init_css_set.list; + do { + struct css_set *cg = + list_entry(l, struct css_set, list); + cg->subsys[ss->subsys_id] = dummytop->subsys[ss->subsys_id]; + l = l->next; + } while (l != &init_css_set.list); + write_unlock(&css_set_lock); /* If this subsystem requested that it be notified with fork * events, we should send it one now for every process in the * system */ + if (ss->fork) { + struct task_struct *g, *p; - read_lock(&tasklist_lock); - init_task.cgroups.subsys[ss->subsys_id] = css; - if (ss->fork) - ss->fork(ss, &init_task); - - do_each_thread(g, p) { - printk(KERN_INFO "Setting task %p css to %p (%d)\n", css, p, p->pid); - p->cgroups.subsys[ss->subsys_id] = css; - if (ss->fork) + read_lock(&tasklist_lock); + do_each_thread(g, p) { ss->fork(ss, p); - } while_each_thread(g, p); - read_unlock(&tasklist_lock); + } while_each_thread(g, p); + read_unlock(&tasklist_lock); + } need_forkexit_callback |= ss->fork || ss->exit; @@ -1573,8 +2247,22 @@ static void cgroup_init_subsys(struct cgroup_subsys *ss) int __init cgroup_init_early(void) { int i; + kref_init(&init_css_set.ref); + kref_get(&init_css_set.ref); + INIT_LIST_HEAD(&init_css_set.list); + INIT_LIST_HEAD(&init_css_set.cg_links); + INIT_LIST_HEAD(&init_css_set.tasks); + css_set_count = 1; init_cgroup_root(&rootnode); list_add(&rootnode.root_list, &roots); + root_count = 1; + init_task.cgroups = &init_css_set; + + init_css_set_link.cg = &init_css_set; + list_add(&init_css_set_link.cgrp_link_list, + &rootnode.top_cgroup.css_sets); + list_add(&init_css_set_link.cg_link_list, + &init_css_set.cg_links); for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) { struct cgroup_subsys *ss = subsys[i]; @@ -1584,7 +2272,7 @@ int __init cgroup_init_early(void) BUG_ON(!ss->create); BUG_ON(!ss->destroy); if (ss->subsys_id != i) { - printk(KERN_ERR "Subsys %s id == %d\n", + printk(KERN_ERR "cgroup: Subsys %s id == %d\n", ss->name, ss->subsys_id); BUG(); } @@ -1668,7 +2356,7 @@ static int proc_cgroup_show(struct seq_file *m, void *v) for_each_root(root) { struct cgroup_subsys *ss; - struct cgroup *cont; + struct cgroup *cgrp; int subsys_id; int count = 0; @@ -1679,8 +2367,8 @@ static int proc_cgroup_show(struct seq_file *m, void *v) seq_printf(m, "%s%s", count++ ? "," : "", ss->name); seq_putc(m, ':'); get_first_subsys(&root->top_cgroup, NULL, &subsys_id); - cont = task_cgroup(tsk, subsys_id); - retval = cgroup_path(cont, buf, PAGE_SIZE); + cgrp = task_cgroup(tsk, subsys_id); + retval = cgroup_path(cgrp, buf, PAGE_SIZE); if (retval < 0) goto out_unlock; seq_puts(m, buf); @@ -1713,31 +2401,14 @@ struct file_operations proc_cgroup_operations = { static int proc_cgroupstats_show(struct seq_file *m, void *v) { int i; - struct cgroupfs_root *root; + seq_puts(m, "#subsys_name\thierarchy\tnum_cgroups\n"); mutex_lock(&cgroup_mutex); - seq_puts(m, "Hierarchies:\n"); - for_each_root(root) { - struct cgroup_subsys *ss; - int first = 1; - seq_printf(m, "%p: bits=%lx cgroups=%d (", root, - root->subsys_bits, root->number_of_cgroups); - for_each_subsys(root, ss) { - seq_printf(m, "%s%s", first ? "" : ", ", ss->name); - first = false; - } - seq_putc(m, ')'); - if (root->sb) { - seq_printf(m, " s_active=%d", - atomic_read(&root->sb->s_active)); - } - seq_putc(m, '\n'); - } - seq_puts(m, "Subsystems:\n"); for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) { struct cgroup_subsys *ss = subsys[i]; - seq_printf(m, "%d: name=%s hierarchy=%p\n", - i, ss->name, ss->root); + seq_printf(m, "%s\t%lu\t%d\n", + ss->name, ss->root->subsys_bits, + ss->root->number_of_cgroups); } mutex_unlock(&cgroup_mutex); return 0; @@ -1765,18 +2436,19 @@ static struct file_operations proc_cgroupstats_operations = { * fork.c by dup_task_struct(). However, we ignore that copy, since * it was not made under the protection of RCU or cgroup_mutex, so * might no longer be a valid cgroup pointer. attach_task() might - * have already changed current->cgroup, allowing the previously - * referenced cgroup to be removed and freed. + * have already changed current->cgroups, allowing the previously + * referenced cgroup group to be removed and freed. * * At the point that cgroup_fork() is called, 'current' is the parent * task, and the passed argument 'child' points to the child task. */ void cgroup_fork(struct task_struct *child) { - rcu_read_lock(); - child->cgroups = rcu_dereference(current->cgroups); - get_css_set(&child->cgroups); - rcu_read_unlock(); + task_lock(current); + child->cgroups = current->cgroups; + get_css_set(child->cgroups); + task_unlock(current); + INIT_LIST_HEAD(&child->cg_list); } /** @@ -1796,6 +2468,21 @@ void cgroup_fork_callbacks(struct task_struct *child) } } +/** + * cgroup_post_fork - called on a new task after adding it to the + * task list. Adds the task to the list running through its css_set + * if necessary. Has to be after the task is visible on the task list + * in case we race with the first call to cgroup_iter_start() - to + * guarantee that the new task ends up on its list. */ +void cgroup_post_fork(struct task_struct *child) +{ + if (use_task_css_set_links) { + write_lock(&css_set_lock); + if (list_empty(&child->cg_list)) + list_add(&child->cg_list, &child->cgroups->tasks); + write_unlock(&css_set_lock); + } +} /** * cgroup_exit - detach cgroup from exiting task * @tsk: pointer to task_struct of exiting process @@ -1834,6 +2521,7 @@ void cgroup_fork_callbacks(struct task_struct *child) void cgroup_exit(struct task_struct *tsk, int run_callbacks) { int i; + struct css_set *cg; if (run_callbacks && need_forkexit_callback) { for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) { @@ -1842,11 +2530,26 @@ void cgroup_exit(struct task_struct *tsk, int run_callbacks) ss->exit(ss, tsk); } } + + /* + * Unlink from the css_set task list if necessary. + * Optimistically check cg_list before taking + * css_set_lock + */ + if (!list_empty(&tsk->cg_list)) { + write_lock(&css_set_lock); + if (!list_empty(&tsk->cg_list)) + list_del(&tsk->cg_list); + write_unlock(&css_set_lock); + } + /* Reassign the task to the init_css_set. */ task_lock(tsk); - put_css_set(&tsk->cgroups); - tsk->cgroups = init_task.cgroups; + cg = tsk->cgroups; + tsk->cgroups = &init_css_set; task_unlock(tsk); + if (cg) + put_css_set_taskexit(cg); } /** @@ -1880,7 +2583,7 @@ int cgroup_clone(struct task_struct *tsk, struct cgroup_subsys *subsys) mutex_unlock(&cgroup_mutex); return 0; } - cg = &tsk->cgroups; + cg = tsk->cgroups; parent = task_cgroup(tsk, subsys->subsys_id); snprintf(nodename, MAX_CGROUP_TYPE_NAMELEN, "node_%d", tsk->pid); @@ -1888,6 +2591,8 @@ int cgroup_clone(struct task_struct *tsk, struct cgroup_subsys *subsys) /* Pin the hierarchy */ atomic_inc(&parent->root->sb->s_active); + /* Keep the cgroup alive */ + get_css_set(cg); mutex_unlock(&cgroup_mutex); /* Now do the VFS work to create a cgroup */ @@ -1899,7 +2604,7 @@ int cgroup_clone(struct task_struct *tsk, struct cgroup_subsys *subsys) dentry = lookup_one_len(nodename, parent->dentry, strlen(nodename)); if (IS_ERR(dentry)) { printk(KERN_INFO - "Couldn't allocate dentry for %s: %ld\n", nodename, + "cgroup: Couldn't allocate dentry for %s: %ld\n", nodename, PTR_ERR(dentry)); ret = PTR_ERR(dentry); goto out_release; @@ -1907,7 +2612,7 @@ int cgroup_clone(struct task_struct *tsk, struct cgroup_subsys *subsys) /* Create the cgroup directory, which also creates the cgroup */ ret = vfs_mkdir(inode, dentry, S_IFDIR | 0755); - child = __d_cont(dentry); + child = __d_cgrp(dentry); dput(dentry); if (ret) { printk(KERN_INFO @@ -1931,6 +2636,7 @@ int cgroup_clone(struct task_struct *tsk, struct cgroup_subsys *subsys) (parent != task_cgroup(tsk, subsys->subsys_id))) { /* Aargh, we raced ... */ mutex_unlock(&inode->i_mutex); + put_css_set(cg); deactivate_super(parent->root->sb); /* The cgroup is still accessible in the VFS, but @@ -1954,12 +2660,16 @@ int cgroup_clone(struct task_struct *tsk, struct cgroup_subsys *subsys) out_release: mutex_unlock(&inode->i_mutex); + + mutex_lock(&cgroup_mutex); + put_css_set(cg); + mutex_unlock(&cgroup_mutex); deactivate_super(parent->root->sb); return ret; } /* - * See if "cont" is a descendant of the current task's cgroup in + * See if "cgrp" is a descendant of the current task's cgroup in * the appropriate hierarchy * * If we are sending in dummytop, then presumably we are creating @@ -1967,19 +2677,127 @@ int cgroup_clone(struct task_struct *tsk, struct cgroup_subsys *subsys) * * Called only by the ns (nsproxy) cgroup. */ -int cgroup_is_descendant(const struct cgroup *cont) +int cgroup_is_descendant(const struct cgroup *cgrp) { int ret; struct cgroup *target; int subsys_id; - if (cont == dummytop) + if (cgrp == dummytop) return 1; - get_first_subsys(cont, NULL, &subsys_id); + get_first_subsys(cgrp, NULL, &subsys_id); target = task_cgroup(current, subsys_id); - while (cont != target && cont!= cont->top_cgroup) - cont = cont->parent; - ret = (cont == target); + while (cgrp != target && cgrp!= cgrp->top_cgroup) + cgrp = cgrp->parent; + ret = (cgrp == target); return ret; } + +static void check_for_release(struct cgroup *cgrp) +{ + /* All of these checks rely on RCU to keep the cgroup + * structure alive */ + if (cgroup_is_releasable(cgrp) && !atomic_read(&cgrp->count) + && list_empty(&cgrp->children) && !cgroup_has_css_refs(cgrp)) { + /* Control Group is currently removeable. If it's not + * already queued for a userspace notification, queue + * it now */ + int need_schedule_work = 0; + spin_lock(&release_list_lock); + if (!cgroup_is_removed(cgrp) && + list_empty(&cgrp->release_list)) { + list_add(&cgrp->release_list, &release_list); + need_schedule_work = 1; + } + spin_unlock(&release_list_lock); + if (need_schedule_work) + schedule_work(&release_agent_work); + } +} + +void __css_put(struct cgroup_subsys_state *css) +{ + struct cgroup *cgrp = css->cgroup; + rcu_read_lock(); + if (atomic_dec_and_test(&css->refcnt) && notify_on_release(cgrp)) { + set_bit(CGRP_RELEASABLE, &cgrp->flags); + check_for_release(cgrp); + } + rcu_read_unlock(); +} + +/* + * Notify userspace when a cgroup is released, by running the + * configured release agent with the name of the cgroup (path + * relative to the root of cgroup file system) as the argument. + * + * Most likely, this user command will try to rmdir this cgroup. + * + * This races with the possibility that some other task will be + * attached to this cgroup before it is removed, or that some other + * user task will 'mkdir' a child cgroup of this cgroup. That's ok. + * The presumed 'rmdir' will fail quietly if this cgroup is no longer + * unused, and this cgroup will be reprieved from its death sentence, + * to continue to serve a useful existence. Next time it's released, + * we will get notified again, if it still has 'notify_on_release' set. + * + * The final arg to call_usermodehelper() is UMH_WAIT_EXEC, which + * means only wait until the task is successfully execve()'d. The + * separate release agent task is forked by call_usermodehelper(), + * then control in this thread returns here, without waiting for the + * release agent task. We don't bother to wait because the caller of + * this routine has no use for the exit status of the release agent + * task, so no sense holding our caller up for that. + * + */ + +static void cgroup_release_agent(struct work_struct *work) +{ + BUG_ON(work != &release_agent_work); + mutex_lock(&cgroup_mutex); + spin_lock(&release_list_lock); + while (!list_empty(&release_list)) { + char *argv[3], *envp[3]; + int i; + char *pathbuf; + struct cgroup *cgrp = list_entry(release_list.next, + struct cgroup, + release_list); + list_del_init(&cgrp->release_list); + spin_unlock(&release_list_lock); + pathbuf = kmalloc(PAGE_SIZE, GFP_KERNEL); + if (!pathbuf) { + spin_lock(&release_list_lock); + continue; + } + + if (cgroup_path(cgrp, pathbuf, PAGE_SIZE) < 0) { + kfree(pathbuf); + spin_lock(&release_list_lock); + continue; + } + + i = 0; + argv[i++] = cgrp->root->release_agent_path; + argv[i++] = (char *)pathbuf; + argv[i] = NULL; + + i = 0; + /* minimal command environment */ + envp[i++] = "HOME=/"; + envp[i++] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin"; + envp[i] = NULL; + + /* Drop the lock while we invoke the usermode helper, + * since the exec could involve hitting disk and hence + * be a slow process */ + mutex_unlock(&cgroup_mutex); + call_usermodehelper(argv[0], argv, envp, UMH_WAIT_EXEC); + kfree(pathbuf); + mutex_lock(&cgroup_mutex); + spin_lock(&release_list_lock); + } + spin_unlock(&release_list_lock); + mutex_unlock(&cgroup_mutex); +}