}
/*
- * Increment this atomic integer everytime any cpuset changes its
+ * Increment this integer everytime any cpuset changes its
* mems_allowed value. Users of cpusets can track this generation
* number, and avoid having to lock and reload mems_allowed unless
* the cpuset they're using changes generation.
* on every visit to __alloc_pages(), to efficiently check whether
* its current->cpuset->mems_allowed has changed, requiring an update
* of its current->mems_allowed.
+ *
+ * Since cpuset_mems_generation is guarded by manage_mutex,
+ * there is no need to mark it atomic.
*/
-static atomic_t cpuset_mems_generation = ATOMIC_INIT(1);
+static int cpuset_mems_generation;
static struct cpuset top_cpuset = {
.flags = ((1 << CS_CPU_EXCLUSIVE) | (1 << CS_MEM_EXCLUSIVE)),
* current->cpuset if a task has its memory placement changed.
* Do not call this routine if in_interrupt().
*
- * Call without callback_mutex or task_lock() held. May be called
- * with or without manage_mutex held. Doesn't need task_lock to guard
- * against another task changing a non-NULL cpuset pointer to NULL,
- * as that is only done by a task on itself, and if the current task
- * is here, it is not simultaneously in the exit code NULL'ing its
- * cpuset pointer. This routine also might acquire callback_mutex and
+ * Call without callback_mutex or task_lock() held. May be
+ * called with or without manage_mutex held. Thanks in part to
+ * 'the_top_cpuset_hack', the tasks cpuset pointer will never
+ * be NULL. This routine also might acquire callback_mutex and
* current->mm->mmap_sem during call.
*
* Reading current->cpuset->mems_generation doesn't need task_lock
return 0;
}
+/*
+ * cpuset_migrate_mm
+ *
+ * Migrate memory region from one set of nodes to another.
+ *
+ * Temporarilly set tasks mems_allowed to target nodes of migration,
+ * so that the migration code can allocate pages on these nodes.
+ *
+ * Call holding manage_mutex, so our current->cpuset won't change
+ * during this call, as manage_mutex holds off any attach_task()
+ * calls. Therefore we don't need to take task_lock around the
+ * call to guarantee_online_mems(), as we know no one is changing
+ * our tasks cpuset.
+ *
+ * Hold callback_mutex around the two modifications of our tasks
+ * mems_allowed to synchronize with cpuset_mems_allowed().
+ *
+ * While the mm_struct we are migrating is typically from some
+ * other task, the task_struct mems_allowed that we are hacking
+ * is for our current task, which must allocate new pages for that
+ * migrating memory region.
+ *
+ * We call cpuset_update_task_memory_state() before hacking
+ * our tasks mems_allowed, so that we are assured of being in
+ * sync with our tasks cpuset, and in particular, callbacks to
+ * cpuset_update_task_memory_state() from nested page allocations
+ * won't see any mismatch of our cpuset and task mems_generation
+ * values, so won't overwrite our hacked tasks mems_allowed
+ * nodemask.
+ */
+
+static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from,
+ const nodemask_t *to)
+{
+ struct task_struct *tsk = current;
+
+ cpuset_update_task_memory_state();
+
+ mutex_lock(&callback_mutex);
+ tsk->mems_allowed = *to;
+ mutex_unlock(&callback_mutex);
+
+ do_migrate_pages(mm, from, to, MPOL_MF_MOVE_ALL);
+
+ mutex_lock(&callback_mutex);
+ guarantee_online_mems(tsk->cpuset, &tsk->mems_allowed);
+ mutex_unlock(&callback_mutex);
+}
+
/*
* Handle user request to change the 'mems' memory placement
* of a cpuset. Needs to validate the request, update the
mutex_lock(&callback_mutex);
cs->mems_allowed = trialcs.mems_allowed;
- cs->mems_generation = atomic_inc_return(&cpuset_mems_generation);
+ cs->mems_generation = cpuset_mems_generation++;
mutex_unlock(&callback_mutex);
set_cpuset_being_rebound(cs); /* causes mpol_copy() rebind */
struct mm_struct *mm = mmarray[i];
mpol_rebind_mm(mm, &cs->mems_allowed);
- if (migrate) {
- do_migrate_pages(mm, &oldmem, &cs->mems_allowed,
- MPOL_MF_MOVE_ALL);
- }
+ if (migrate)
+ cpuset_migrate_mm(mm, &oldmem, &cs->mems_allowed);
mmput(mm);
}
mm = get_task_mm(tsk);
if (mm) {
mpol_rebind_mm(mm, &to);
+ if (is_memory_migrate(cs))
+ cpuset_migrate_mm(mm, &from, &to);
mmput(mm);
}
- if (is_memory_migrate(cs))
- do_migrate_pages(tsk->mm, &from, &to, MPOL_MF_MOVE_ALL);
put_task_struct(tsk);
synchronize_rcu();
if (atomic_dec_and_test(&oldcs->count))
break;
case FILE_SPREAD_PAGE:
retval = update_flag(CS_SPREAD_PAGE, cs, buffer);
- cs->mems_generation = atomic_inc_return(&cpuset_mems_generation);
+ cs->mems_generation = cpuset_mems_generation++;
break;
case FILE_SPREAD_SLAB:
retval = update_flag(CS_SPREAD_SLAB, cs, buffer);
- cs->mems_generation = atomic_inc_return(&cpuset_mems_generation);
+ cs->mems_generation = cpuset_mems_generation++;
break;
case FILE_TASKLIST:
retval = attach_task(cs, buffer, &pathbuf);
atomic_set(&cs->count, 0);
INIT_LIST_HEAD(&cs->sibling);
INIT_LIST_HEAD(&cs->children);
- cs->mems_generation = atomic_inc_return(&cpuset_mems_generation);
+ cs->mems_generation = cpuset_mems_generation++;
fmeter_init(&cs->fmeter);
cs->parent = parent;
struct task_struct *tsk = current;
tsk->cpuset = &top_cpuset;
- tsk->cpuset->mems_generation = atomic_inc_return(&cpuset_mems_generation);
+ tsk->cpuset->mems_generation = cpuset_mems_generation++;
return 0;
}
top_cpuset.mems_allowed = NODE_MASK_ALL;
fmeter_init(&top_cpuset.fmeter);
- top_cpuset.mems_generation = atomic_inc_return(&cpuset_mems_generation);
+ top_cpuset.mems_generation = cpuset_mems_generation++;
init_task.cpuset = &top_cpuset;
* because tsk is already marked PF_EXITING, so attach_task() won't
* mess with it, or task is a failed fork, never visible to attach_task.
*
- * Hack:
+ * the_top_cpuset_hack:
*
* Set the exiting tasks cpuset to the root cpuset (top_cpuset).
*
struct cpuset *cs;
cs = tsk->cpuset;
- tsk->cpuset = &top_cpuset; /* Hack - see comment above */
+ tsk->cpuset = &top_cpuset; /* the_top_cpuset_hack - see above */
if (notify_on_release(cs)) {
char *pathbuf = NULL;
* So only GFP_KERNEL allocations, if all nodes in the cpuset are
* short of memory, might require taking the callback_mutex mutex.
*
- * The first loop over the zonelist in mm/page_alloc.c:__alloc_pages()
- * calls here with __GFP_HARDWALL always set in gfp_mask, enforcing
- * hardwall cpusets - no allocation on a node outside the cpuset is
- * allowed (unless in interrupt, of course).
+ * The first call here from mm/page_alloc:get_page_from_freelist()
+ * has __GFP_HARDWALL set in gfp_mask, enforcing hardwall cpusets, so
+ * no allocation on a node outside the cpuset is allowed (unless in
+ * interrupt, of course).
*
- * The second loop doesn't even call here for GFP_ATOMIC requests
- * (if the __alloc_pages() local variable 'wait' is set). That check
- * and the checks below have the combined affect in the second loop of
- * the __alloc_pages() routine that:
+ * The second pass through get_page_from_freelist() doesn't even call
+ * here for GFP_ATOMIC calls. For those calls, the __alloc_pages()
+ * variable 'wait' is not set, and the bit ALLOC_CPUSET is not set
+ * in alloc_flags. That logic and the checks below have the combined
+ * affect that:
* in_interrupt - any node ok (current task context irrelevant)
* GFP_ATOMIC - any node ok
* GFP_KERNEL - any node in enclosing mem_exclusive cpuset ok
* GFP_USER - only nodes in current tasks mems allowed ok.
+ *
+ * Rule:
+ * Don't call cpuset_zone_allowed() if you can't sleep, unless you
+ * pass in the __GFP_HARDWALL flag set in gfp_flag, which disables
+ * the code that might scan up ancestor cpusets and sleep.
**/
int __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
{
int node; /* node that zone z is on */
const struct cpuset *cs; /* current cpuset ancestors */
- int allowed = 1; /* is allocation in zone z allowed? */
+ int allowed; /* is allocation in zone z allowed? */
if (in_interrupt())
return 1;
node = z->zone_pgdat->node_id;
+ might_sleep_if(!(gfp_mask & __GFP_HARDWALL));
if (node_isset(node, current->mems_allowed))
return 1;
if (gfp_mask & __GFP_HARDWALL) /* If hardwall request, stop here */
* - No need to task_lock(tsk) on this tsk->cpuset reference, as it
* doesn't really matter if tsk->cpuset changes after we read it,
* and we take manage_mutex, keeping attach_task() from changing it
- * anyway.
+ * anyway. No need to check that tsk->cpuset != NULL, thanks to
+ * the_top_cpuset_hack in cpuset_exit(), which sets an exiting tasks
+ * cpuset to top_cpuset.
*/
-
static int proc_cpuset_show(struct seq_file *m, void *v)
{
- struct cpuset *cs;
struct task_struct *tsk;
char *buf;
int retval = 0;
tsk = m->private;
mutex_lock(&manage_mutex);
- cs = tsk->cpuset;
- if (!cs) {
- retval = -EINVAL;
- goto out;
- }
-
- retval = cpuset_path(cs, buf, PAGE_SIZE);
+ retval = cpuset_path(tsk->cpuset, buf, PAGE_SIZE);
if (retval < 0)
goto out;
seq_puts(m, buf);