static ssize_t node_read_cpumap(struct sys_device * dev, char * buf)
{
struct node *node_dev = to_node(dev);
- cpumask_t mask = node_to_cpumask(node_dev->sysdev.id);
+ node_to_cpumask_ptr(mask, node_dev->sysdev.id);
int len;
/* 2004/06/03: buf currently PAGE_SIZE, need > 1 char per 4 bits. */
BUILD_BUG_ON(MAX_NUMNODES/4 > PAGE_SIZE/2);
- len = cpumask_scnprintf(buf, PAGE_SIZE-1, mask);
- len += sprintf(buf + len, "\n");
+ len = cpumask_scnprintf(buf, PAGE_SIZE-2, *mask);
+ buf[len++] = '\n';
+ buf[len] = '\0';
return len;
}
*
* Should use nodemask_t.
*/
-static int find_next_best_node(int node, unsigned long *used_nodes)
+static int find_next_best_node(int node, nodemask_t *used_nodes)
{
int i, n, val, min_val, best_node = 0;
continue;
/* Skip already used nodes */
- if (test_bit(n, used_nodes))
+ if (node_isset(n, *used_nodes))
continue;
/* Simple min distance search */
}
}
- set_bit(best_node, used_nodes);
+ node_set(best_node, *used_nodes);
return best_node;
}
/**
* sched_domain_node_span - get a cpumask for a node's sched_domain
* @node: node whose cpumask we're constructing
- * @size: number of nodes to include in this span
*
* Given a node, construct a good cpumask for its sched_domain to span. It
* should be one that prevents unnecessary balancing, but also spreads tasks
*/
static cpumask_t sched_domain_node_span(int node)
{
- DECLARE_BITMAP(used_nodes, MAX_NUMNODES);
- cpumask_t span, nodemask;
+ nodemask_t used_nodes;
+ cpumask_t span;
+ node_to_cpumask_ptr(nodemask, node);
int i;
cpus_clear(span);
- bitmap_zero(used_nodes, MAX_NUMNODES);
+ nodes_clear(used_nodes);
- nodemask = node_to_cpumask(node);
- cpus_or(span, span, nodemask);
- set_bit(node, used_nodes);
+ cpus_or(span, span, *nodemask);
+ node_set(node, used_nodes);
for (i = 1; i < SD_NODES_PER_DOMAIN; i++) {
- int next_node = find_next_best_node(node, used_nodes);
+ int next_node = find_next_best_node(node, &used_nodes);
- nodemask = node_to_cpumask(next_node);
- cpus_or(span, span, nodemask);
+ node_to_cpumask_ptr_next(nodemask, next_node);
+ cpus_or(span, span, *nodemask);
}
return span;
for (j = 0; j < MAX_NUMNODES; j++) {
cpumask_t tmp, notcovered;
int n = (i + j) % MAX_NUMNODES;
+ node_to_cpumask_ptr(pnodemask, n);
cpus_complement(notcovered, covered);
cpus_and(tmp, notcovered, *cpu_map);
if (cpus_empty(tmp))
break;
- nodemask = node_to_cpumask(n);
- cpus_and(tmp, tmp, nodemask);
+ cpus_and(tmp, tmp, *pnodemask);
if (cpus_empty(tmp))
continue;
int n, val;
int min_val = INT_MAX;
int best_node = -1;
+ node_to_cpumask_ptr(tmp, 0);
/* Use the local node if we haven't already */
if (!node_isset(node, *used_node_mask)) {
}
for_each_node_state(n, N_HIGH_MEMORY) {
- cpumask_t tmp;
/* Don't want a node to appear more than once */
if (node_isset(n, *used_node_mask))
val += (n < node);
/* Give preference to headless and unused nodes */
- tmp = node_to_cpumask(n);
- if (!cpus_empty(tmp))
+ node_to_cpumask_ptr_next(tmp, n);
+ if (!cpus_empty(*tmp))
val += PENALTY_FOR_NODE_WITH_CPUS;
/* Slight preference for less loaded node */
struct kmem_cache *cachep;
struct kmem_list3 *l3 = NULL;
int node = cpu_to_node(cpu);
+ node_to_cpumask_ptr(mask, node);
list_for_each_entry(cachep, &cache_chain, next) {
struct array_cache *nc;
struct array_cache *shared;
struct array_cache **alien;
- cpumask_t mask;
- mask = node_to_cpumask(node);
/* cpu is dead; no one can alloc from it. */
nc = cachep->array[cpu];
cachep->array[cpu] = NULL;
if (nc)
free_block(cachep, nc->entry, nc->avail, node);
- if (!cpus_empty(mask)) {
+ if (!cpus_empty(*mask)) {
spin_unlock_irq(&l3->list_lock);
goto free_array_cache;
}
struct reclaim_state reclaim_state = {
.reclaimed_slab = 0,
};
- cpumask_t cpumask;
+ node_to_cpumask_ptr(cpumask, pgdat->node_id);
- cpumask = node_to_cpumask(pgdat->node_id);
- if (!cpus_empty(cpumask))
- set_cpus_allowed(tsk, cpumask);
+ if (!cpus_empty(*cpumask))
+ set_cpus_allowed_ptr(tsk, cpumask);
current->reclaim_state = &reclaim_state;
/*
static int __devinit cpu_callback(struct notifier_block *nfb,
unsigned long action, void *hcpu)
{
- pg_data_t *pgdat;
- cpumask_t mask;
int nid;
if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) {
for_each_node_state(nid, N_HIGH_MEMORY) {
- pgdat = NODE_DATA(nid);
- mask = node_to_cpumask(pgdat->node_id);
- if (any_online_cpu(mask) != NR_CPUS)
+ pg_data_t *pgdat = NODE_DATA(nid);
+ node_to_cpumask_ptr(mask, pgdat->node_id);
+
+ if (any_online_cpu(*mask) < nr_cpu_ids)
/* One of our CPUs online: restore mask */
- set_cpus_allowed(pgdat->kswapd, mask);
+ set_cpus_allowed_ptr(pgdat->kswapd, mask);
}
}
return NOTIFY_OK;
svc_pool_map_set_cpumask(unsigned int pidx, cpumask_t *oldmask)
{
struct svc_pool_map *m = &svc_pool_map;
- unsigned int node; /* or cpu */
/*
* The caller checks for sv_nrpools > 1, which
default:
return 0;
case SVC_POOL_PERCPU:
- node = m->pool_to[pidx];
+ {
+ unsigned int cpu = m->pool_to[pidx];
+
*oldmask = current->cpus_allowed;
- set_cpus_allowed(current, cpumask_of_cpu(node));
+ set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
return 1;
+ }
case SVC_POOL_PERNODE:
- node = m->pool_to[pidx];
+ {
+ unsigned int node = m->pool_to[pidx];
+ node_to_cpumask_ptr(nodecpumask, node);
+
*oldmask = current->cpus_allowed;
- set_cpus_allowed(current, node_to_cpumask(node));
+ set_cpus_allowed_ptr(current, nodecpumask);
return 1;
}
+ }
}
/*