]> err.no Git - linux-2.6/commitdiff
[PATCH] for_each_possible_cpu: sparc64
authorKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Tue, 11 Apr 2006 05:52:52 +0000 (22:52 -0700)
committerLinus Torvalds <torvalds@g5.osdl.org>
Tue, 11 Apr 2006 13:18:31 +0000 (06:18 -0700)
for_each_cpu() actually iterates across all possible CPUs.  We've had mistakes
in the past where people were using for_each_cpu() where they should have been
iterating across only online or present CPUs.  This is inefficient and
possibly buggy.

We're renaming for_each_cpu() to for_each_possible_cpu() to avoid this in the
future.

This patch replaces for_each_cpu with for_each_possible_cpu.
for sparc64.

Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Acked-by: "David S. Miller" <davem@davemloft.net>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
arch/sparc64/kernel/pci_sun4v.c
arch/sparc64/kernel/setup.c
arch/sparc64/kernel/smp.c
include/asm-sparc64/percpu.h

index 9372d4f376d5af9aad36d094194abd9da5e2d28a..9e94db2573a2f83f8c1d58072d8e2d28cd0035d4 100644 (file)
@@ -1092,7 +1092,7 @@ void sun4v_pci_init(int node, char *model_name)
                }
        }
 
-       for_each_cpu(i) {
+       for_each_possible_cpu(i) {
                unsigned long page = get_zeroed_page(GFP_ATOMIC);
 
                if (!page)
index 7d0e67c1ce5099102dce96928a2b3ae3b119ef57..005167f82419b475cb7ae1e4ef109610ba61a5dd 100644 (file)
@@ -535,7 +535,7 @@ static int __init topology_init(void)
        while (!cpu_find_by_instance(ncpus_probed, NULL, NULL))
                ncpus_probed++;
 
-       for_each_cpu(i) {
+       for_each_possible_cpu(i) {
                struct cpu *p = kzalloc(sizeof(*p), GFP_KERNEL);
                if (p) {
                        register_cpu(p, i, NULL);
index eb36f7988ff790d083782e0d33d4b5a536156d99..90eaca3ec9a628c40ba90f27c2a89e825afdf736 100644 (file)
@@ -1280,7 +1280,7 @@ int setup_profiling_timer(unsigned int multiplier)
                return -EINVAL;
 
        spin_lock_irqsave(&prof_setup_lock, flags);
-       for_each_cpu(i)
+       for_each_possible_cpu(i)
                prof_multiplier(i) = multiplier;
        current_tick_offset = (timer_tick_offset / multiplier);
        spin_unlock_irqrestore(&prof_setup_lock, flags);
@@ -1308,12 +1308,12 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
                }
        }
 
-       for_each_cpu(i) {
+       for_each_possible_cpu(i) {
                if (tlb_type == hypervisor) {
                        int j;
 
                        /* XXX get this mapping from machine description */
-                       for_each_cpu(j) {
+                       for_each_possible_cpu(j) {
                                if ((j >> 2) == (i >> 2))
                                        cpu_set(j, cpu_sibling_map[i]);
                        }
index 82032e159a76376040b8ac9084e788f75eeb4e1f..baef13b589525f7b7b9e21fe4dde913def2e40e4 100644 (file)
@@ -26,7 +26,7 @@ register unsigned long __local_per_cpu_offset asm("g5");
 #define percpu_modcopy(pcpudst, src, size)                     \
 do {                                                           \
        unsigned int __i;                                       \
-       for_each_cpu(__i)                                       \
+       for_each_possible_cpu(__i)                              \
                memcpy((pcpudst)+__per_cpu_offset(__i),         \
                       (src), (size));                          \
 } while (0)