X-Git-Url: https://err.no/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=kernel%2Fcpuset.c;h=28176d083f7baadca422dfd510fdd7dbfd892c00;hb=e0f998930eb67c49f2862c58a45262ad0bc03eca;hp=8ab1b4e518b8909a867595534957dbf3c12cf3d8;hpb=7ee175f7418cfb17872afd8ffca767ab0e75aff3;p=linux-2.6 diff --git a/kernel/cpuset.c b/kernel/cpuset.c index 8ab1b4e518..28176d083f 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c @@ -180,6 +180,42 @@ static struct super_block *cpuset_sb = NULL; */ static DECLARE_MUTEX(cpuset_sem); +static struct task_struct *cpuset_sem_owner; +static int cpuset_sem_depth; + +/* + * The global cpuset semaphore cpuset_sem can be needed by the + * memory allocator to update a tasks mems_allowed (see the calls + * to cpuset_update_current_mems_allowed()) or to walk up the + * cpuset hierarchy to find a mem_exclusive cpuset see the calls + * to cpuset_excl_nodes_overlap()). + * + * But if the memory allocation is being done by cpuset.c code, it + * usually already holds cpuset_sem. Double tripping on a kernel + * semaphore deadlocks the current task, and any other task that + * subsequently tries to obtain the lock. + * + * Run all up's and down's on cpuset_sem through the following + * wrappers, which will detect this nested locking, and avoid + * deadlocking. + */ + +static inline void cpuset_down(struct semaphore *psem) +{ + if (cpuset_sem_owner != current) { + down(psem); + cpuset_sem_owner = current; + } + cpuset_sem_depth++; +} + +static inline void cpuset_up(struct semaphore *psem) +{ + if (--cpuset_sem_depth == 0) { + cpuset_sem_owner = NULL; + up(psem); + } +} /* * A couple of forward declarations required, due to cyclic reference loop: @@ -522,19 +558,10 @@ static void guarantee_online_mems(const struct cpuset *cs, nodemask_t *pmask) * Refresh current tasks mems_allowed and mems_generation from * current tasks cpuset. Call with cpuset_sem held. * - * Be sure to call refresh_mems() on any cpuset operation which - * (1) holds cpuset_sem, and (2) might possibly alloc memory. - * Call after obtaining cpuset_sem lock, before any possible - * allocation. Otherwise one risks trying to allocate memory - * while the task cpuset_mems_generation is not the same as - * the mems_generation in its cpuset, which would deadlock on - * cpuset_sem in cpuset_update_current_mems_allowed(). - * - * Since we hold cpuset_sem, once refresh_mems() is called, the - * test (current->cpuset_mems_generation != cs->mems_generation) - * in cpuset_update_current_mems_allowed() will remain false, - * until we drop cpuset_sem. Anyone else who would change our - * cpusets mems_generation needs to lock cpuset_sem first. + * This routine is needed to update the per-task mems_allowed + * data, within the tasks context, when it is trying to allocate + * memory (in various mm/mempolicy.c routines) and notices + * that some other task has been modifying its cpuset. */ static void refresh_mems(void) @@ -628,13 +655,6 @@ static int validate_change(const struct cpuset *cur, const struct cpuset *trial) * lock_cpu_hotplug()/unlock_cpu_hotplug() pair. */ -/* - * Hack to avoid 2.6.13 partial node dynamic sched domain bug. - * Disable letting 'cpu_exclusive' cpusets define dynamic sched - * domains, until the sched domain can handle partial nodes. - * Remove this #if hackery when sched domains fixed. - */ -#if 0 static void update_cpu_domains(struct cpuset *cur) { struct cpuset *c, *par = cur->parent; @@ -675,11 +695,6 @@ static void update_cpu_domains(struct cpuset *cur) partition_sched_domains(&pspan, &cspan); unlock_cpu_hotplug(); } -#else -static void update_cpu_domains(struct cpuset *cur) -{ -} -#endif static int update_cpumask(struct cpuset *cs, char *buf) { @@ -852,7 +867,7 @@ static ssize_t cpuset_common_file_write(struct file *file, const char __user *us } buffer[nbytes] = 0; /* nul-terminate */ - down(&cpuset_sem); + cpuset_down(&cpuset_sem); if (is_removed(cs)) { retval = -ENODEV; @@ -886,7 +901,7 @@ static ssize_t cpuset_common_file_write(struct file *file, const char __user *us if (retval == 0) retval = nbytes; out2: - up(&cpuset_sem); + cpuset_up(&cpuset_sem); cpuset_release_agent(pathbuf); out1: kfree(buffer); @@ -926,9 +941,9 @@ static int cpuset_sprintf_cpulist(char *page, struct cpuset *cs) { cpumask_t mask; - down(&cpuset_sem); + cpuset_down(&cpuset_sem); mask = cs->cpus_allowed; - up(&cpuset_sem); + cpuset_up(&cpuset_sem); return cpulist_scnprintf(page, PAGE_SIZE, mask); } @@ -937,9 +952,9 @@ static int cpuset_sprintf_memlist(char *page, struct cpuset *cs) { nodemask_t mask; - down(&cpuset_sem); + cpuset_down(&cpuset_sem); mask = cs->mems_allowed; - up(&cpuset_sem); + cpuset_up(&cpuset_sem); return nodelist_scnprintf(page, PAGE_SIZE, mask); } @@ -953,8 +968,6 @@ static ssize_t cpuset_common_file_read(struct file *file, char __user *buf, char *page; ssize_t retval = 0; char *s; - char *start; - size_t n; if (!(page = (char *)__get_free_page(GFP_KERNEL))) return -ENOMEM; @@ -984,10 +997,7 @@ static ssize_t cpuset_common_file_read(struct file *file, char __user *buf, *s++ = '\n'; *s = '\0'; - start = page + *ppos; - n = s - start; - retval = n - copy_to_user(buf, start, min(n, nbytes)); - *ppos += retval; + retval = simple_read_from_buffer(buf, nbytes, ppos, page, s - page); out: free_page((unsigned long)page); return retval; @@ -1342,8 +1352,7 @@ static long cpuset_create(struct cpuset *parent, const char *name, int mode) if (!cs) return -ENOMEM; - down(&cpuset_sem); - refresh_mems(); + cpuset_down(&cpuset_sem); cs->flags = 0; if (notify_on_release(parent)) set_bit(CS_NOTIFY_ON_RELEASE, &cs->flags); @@ -1368,14 +1377,14 @@ static long cpuset_create(struct cpuset *parent, const char *name, int mode) * will down() this new directory's i_sem and if we race with * another mkdir, we might deadlock. */ - up(&cpuset_sem); + cpuset_up(&cpuset_sem); err = cpuset_populate_dir(cs->dentry); /* If err < 0, we have a half-filled directory - oh well ;) */ return 0; err: list_del(&cs->sibling); - up(&cpuset_sem); + cpuset_up(&cpuset_sem); kfree(cs); return err; } @@ -1397,14 +1406,13 @@ static int cpuset_rmdir(struct inode *unused_dir, struct dentry *dentry) /* the vfs holds both inode->i_sem already */ - down(&cpuset_sem); - refresh_mems(); + cpuset_down(&cpuset_sem); if (atomic_read(&cs->count) > 0) { - up(&cpuset_sem); + cpuset_up(&cpuset_sem); return -EBUSY; } if (!list_empty(&cs->children)) { - up(&cpuset_sem); + cpuset_up(&cpuset_sem); return -EBUSY; } parent = cs->parent; @@ -1420,7 +1428,7 @@ static int cpuset_rmdir(struct inode *unused_dir, struct dentry *dentry) spin_unlock(&d->d_lock); cpuset_d_remove_dir(d); dput(d); - up(&cpuset_sem); + cpuset_up(&cpuset_sem); cpuset_release_agent(pathbuf); return 0; } @@ -1523,10 +1531,10 @@ void cpuset_exit(struct task_struct *tsk) if (notify_on_release(cs)) { char *pathbuf = NULL; - down(&cpuset_sem); + cpuset_down(&cpuset_sem); if (atomic_dec_and_test(&cs->count)) check_for_release(cs, &pathbuf); - up(&cpuset_sem); + cpuset_up(&cpuset_sem); cpuset_release_agent(pathbuf); } else { atomic_dec(&cs->count); @@ -1547,11 +1555,11 @@ cpumask_t cpuset_cpus_allowed(const struct task_struct *tsk) { cpumask_t mask; - down(&cpuset_sem); + cpuset_down(&cpuset_sem); task_lock((struct task_struct *)tsk); guarantee_online_cpus(tsk->cpuset, &mask); task_unlock((struct task_struct *)tsk); - up(&cpuset_sem); + cpuset_up(&cpuset_sem); return mask; } @@ -1576,9 +1584,9 @@ void cpuset_update_current_mems_allowed(void) if (!cs) return; /* task is exiting */ if (current->cpuset_mems_generation != cs->mems_generation) { - down(&cpuset_sem); + cpuset_down(&cpuset_sem); refresh_mems(); - up(&cpuset_sem); + cpuset_up(&cpuset_sem); } } @@ -1611,17 +1619,114 @@ int cpuset_zonelist_valid_mems_allowed(struct zonelist *zl) return 0; } +/* + * nearest_exclusive_ancestor() - Returns the nearest mem_exclusive + * ancestor to the specified cpuset. Call while holding cpuset_sem. + * If no ancestor is mem_exclusive (an unusual configuration), then + * returns the root cpuset. + */ +static const struct cpuset *nearest_exclusive_ancestor(const struct cpuset *cs) +{ + while (!is_mem_exclusive(cs) && cs->parent) + cs = cs->parent; + return cs; +} + /** - * cpuset_zone_allowed - is zone z allowed in current->mems_allowed - * @z: zone in question + * cpuset_zone_allowed - Can we allocate memory on zone z's memory node? + * @z: is this zone on an allowed node? + * @gfp_mask: memory allocation flags (we use __GFP_HARDWALL) * - * Is zone z allowed in current->mems_allowed, or is - * the CPU in interrupt context? (zone is always allowed in this case) - */ -int cpuset_zone_allowed(struct zone *z) + * If we're in interrupt, yes, we can always allocate. If zone + * z's node is in our tasks mems_allowed, yes. If it's not a + * __GFP_HARDWALL request and this zone's nodes is in the nearest + * mem_exclusive cpuset ancestor to this tasks cpuset, yes. + * Otherwise, no. + * + * GFP_USER allocations are marked with the __GFP_HARDWALL bit, + * and do not allow allocations outside the current tasks cpuset. + * GFP_KERNEL allocations are not so marked, so can escape to the + * nearest mem_exclusive ancestor cpuset. + * + * Scanning up parent cpusets requires cpuset_sem. The __alloc_pages() + * routine only calls here with __GFP_HARDWALL bit _not_ set if + * it's a GFP_KERNEL allocation, and all nodes in the current tasks + * mems_allowed came up empty on the first pass over the zonelist. + * So only GFP_KERNEL allocations, if all nodes in the cpuset are + * short of memory, might require taking the cpuset_sem semaphore. + * + * The first loop over the zonelist in mm/page_alloc.c:__alloc_pages() + * calls here with __GFP_HARDWALL always set in gfp_mask, enforcing + * hardwall cpusets - no allocation on a node outside the cpuset is + * allowed (unless in interrupt, of course). + * + * The second loop doesn't even call here for GFP_ATOMIC requests + * (if the __alloc_pages() local variable 'wait' is set). That check + * and the checks below have the combined affect in the second loop of + * the __alloc_pages() routine that: + * in_interrupt - any node ok (current task context irrelevant) + * GFP_ATOMIC - any node ok + * GFP_KERNEL - any node in enclosing mem_exclusive cpuset ok + * GFP_USER - only nodes in current tasks mems allowed ok. + **/ + +int cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) +{ + int node; /* node that zone z is on */ + const struct cpuset *cs; /* current cpuset ancestors */ + int allowed = 1; /* is allocation in zone z allowed? */ + + if (in_interrupt()) + return 1; + node = z->zone_pgdat->node_id; + if (node_isset(node, current->mems_allowed)) + return 1; + if (gfp_mask & __GFP_HARDWALL) /* If hardwall request, stop here */ + return 0; + + /* Not hardwall and node outside mems_allowed: scan up cpusets */ + cpuset_down(&cpuset_sem); + cs = current->cpuset; + if (!cs) + goto done; /* current task exiting */ + cs = nearest_exclusive_ancestor(cs); + allowed = node_isset(node, cs->mems_allowed); +done: + cpuset_up(&cpuset_sem); + return allowed; +} + +/** + * cpuset_excl_nodes_overlap - Do we overlap @p's mem_exclusive ancestors? + * @p: pointer to task_struct of some other task. + * + * Description: Return true if the nearest mem_exclusive ancestor + * cpusets of tasks @p and current overlap. Used by oom killer to + * determine if task @p's memory usage might impact the memory + * available to the current task. + * + * Acquires cpuset_sem - not suitable for calling from a fast path. + **/ + +int cpuset_excl_nodes_overlap(const struct task_struct *p) { - return in_interrupt() || - node_isset(z->zone_pgdat->node_id, current->mems_allowed); + const struct cpuset *cs1, *cs2; /* my and p's cpuset ancestors */ + int overlap = 0; /* do cpusets overlap? */ + + cpuset_down(&cpuset_sem); + cs1 = current->cpuset; + if (!cs1) + goto done; /* current task exiting */ + cs2 = p->cpuset; + if (!cs2) + goto done; /* task p is exiting */ + cs1 = nearest_exclusive_ancestor(cs1); + cs2 = nearest_exclusive_ancestor(cs2); + overlap = nodes_intersects(cs1->mems_allowed, cs2->mems_allowed); +done: + cpuset_up(&cpuset_sem); + + return overlap; } /* @@ -1642,7 +1747,7 @@ static int proc_cpuset_show(struct seq_file *m, void *v) return -ENOMEM; tsk = m->private; - down(&cpuset_sem); + cpuset_down(&cpuset_sem); task_lock(tsk); cs = tsk->cpuset; task_unlock(tsk); @@ -1657,7 +1762,7 @@ static int proc_cpuset_show(struct seq_file *m, void *v) seq_puts(m, buf); seq_putc(m, '\n'); out: - up(&cpuset_sem); + cpuset_up(&cpuset_sem); kfree(buf); return retval; }