]> err.no Git - linux-2.6/commitdiff
Convert cpu_sibling_map to be a per cpu variable
authorMike Travis <travis@sgi.com>
Tue, 16 Oct 2007 08:24:05 +0000 (01:24 -0700)
committerLinus Torvalds <torvalds@woody.linux-foundation.org>
Tue, 16 Oct 2007 16:42:50 +0000 (09:42 -0700)
Convert cpu_sibling_map from a static array sized by NR_CPUS to a per_cpu
variable.  This saves sizeof(cpumask_t) * NR unused cpus.  Access is mostly
from startup and CPU HOTPLUG functions.

Signed-off-by: Mike Travis <travis@sgi.com>
Cc: Andi Kleen <ak@suse.de>
Cc: Christoph Lameter <clameter@sgi.com>
Cc: "Siddha, Suresh B" <suresh.b.siddha@intel.com>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: "Luck, Tony" <tony.luck@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
26 files changed:
arch/ia64/kernel/setup.c
arch/ia64/kernel/smpboot.c
arch/powerpc/kernel/setup-common.c
arch/powerpc/kernel/setup_64.c
arch/powerpc/kernel/smp.c
arch/powerpc/platforms/cell/cbe_cpufreq.c
arch/sparc64/kernel/smp.c
arch/x86/kernel/cpu/cpufreq/p4-clockmod.c
arch/x86/kernel/cpu/cpufreq/speedstep-ich.c
arch/x86/kernel/io_apic_32.c
arch/x86/kernel/smpboot_32.c
arch/x86/kernel/smpboot_64.c
arch/x86/oprofile/op_model_p4.c
arch/x86/xen/smp.c
block/blktrace.c
include/asm-ia64/smp.h
include/asm-ia64/topology.h
include/asm-powerpc/smp.h
include/asm-powerpc/topology.h
include/asm-sparc64/smp.h
include/asm-sparc64/topology.h
include/asm-x86/smp_32.h
include/asm-x86/smp_64.h
include/asm-x86/topology_32.h
include/asm-x86/topology_64.h
kernel/sched.c

index 9e392a30d19783d3426ca720b95fbea1221a71ff..777c8d8bd5e70febf80c8e5a8ed0c5d94d85de40 100644 (file)
@@ -528,10 +528,6 @@ setup_arch (char **cmdline_p)
 
 #ifdef CONFIG_SMP
        cpu_physical_id(0) = hard_smp_processor_id();
-
-       cpu_set(0, cpu_sibling_map[0]);
-       cpu_set(0, cpu_core_map[0]);
-
        check_for_logical_procs();
        if (smp_num_cpucores > 1)
                printk(KERN_INFO
@@ -873,6 +869,14 @@ cpu_init (void)
        void *cpu_data;
 
        cpu_data = per_cpu_init();
+       /*
+        * insert boot cpu into sibling and core mapes
+        * (must be done after per_cpu area is setup)
+        */
+       if (smp_processor_id() == 0) {
+               cpu_set(0, per_cpu(cpu_sibling_map, 0));
+               cpu_set(0, cpu_core_map[0]);
+       }
 
        /*
         * We set ar.k3 so that assembly code in MCA handler can compute
index 308772f7cddc23f9a733c1054f63dd3cf382d7aa..c57dbce25c12f2b7cbdc05557d2d3c8a7cd4b8bc 100644 (file)
@@ -138,7 +138,9 @@ cpumask_t cpu_possible_map = CPU_MASK_NONE;
 EXPORT_SYMBOL(cpu_possible_map);
 
 cpumask_t cpu_core_map[NR_CPUS] __cacheline_aligned;
-cpumask_t cpu_sibling_map[NR_CPUS] __cacheline_aligned;
+DEFINE_PER_CPU_SHARED_ALIGNED(cpumask_t, cpu_sibling_map);
+EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
+
 int smp_num_siblings = 1;
 int smp_num_cpucores = 1;
 
@@ -650,12 +652,12 @@ clear_cpu_sibling_map(int cpu)
 {
        int i;
 
-       for_each_cpu_mask(i, cpu_sibling_map[cpu])
-               cpu_clear(cpu, cpu_sibling_map[i]);
+       for_each_cpu_mask(i, per_cpu(cpu_sibling_map, cpu))
+               cpu_clear(cpu, per_cpu(cpu_sibling_map, i));
        for_each_cpu_mask(i, cpu_core_map[cpu])
                cpu_clear(cpu, cpu_core_map[i]);
 
-       cpu_sibling_map[cpu] = cpu_core_map[cpu] = CPU_MASK_NONE;
+       per_cpu(cpu_sibling_map, cpu) = cpu_core_map[cpu] = CPU_MASK_NONE;
 }
 
 static void
@@ -666,7 +668,7 @@ remove_siblinginfo(int cpu)
        if (cpu_data(cpu)->threads_per_core == 1 &&
            cpu_data(cpu)->cores_per_socket == 1) {
                cpu_clear(cpu, cpu_core_map[cpu]);
-               cpu_clear(cpu, cpu_sibling_map[cpu]);
+               cpu_clear(cpu, per_cpu(cpu_sibling_map, cpu));
                return;
        }
 
@@ -807,8 +809,8 @@ set_cpu_sibling_map(int cpu)
                        cpu_set(i, cpu_core_map[cpu]);
                        cpu_set(cpu, cpu_core_map[i]);
                        if (cpu_data(cpu)->core_id == cpu_data(i)->core_id) {
-                               cpu_set(i, cpu_sibling_map[cpu]);
-                               cpu_set(cpu, cpu_sibling_map[i]);
+                               cpu_set(i, per_cpu(cpu_sibling_map, cpu));
+                               cpu_set(cpu, per_cpu(cpu_sibling_map, i));
                        }
                }
        }
@@ -839,7 +841,7 @@ __cpu_up (unsigned int cpu)
 
        if (cpu_data(cpu)->threads_per_core == 1 &&
            cpu_data(cpu)->cores_per_socket == 1) {
-               cpu_set(cpu, cpu_sibling_map[cpu]);
+               cpu_set(cpu, per_cpu(cpu_sibling_map, cpu));
                cpu_set(cpu, cpu_core_map[cpu]);
                return 0;
        }
index 36c90ba2d31269a2fa32d950c1ec812c6e94264b..2de00f870edc34438352ae71ed54c7013228b7fa 100644 (file)
@@ -413,16 +413,28 @@ void __init smp_setup_cpu_maps(void)
                of_node_put(dn);
        }
 
+       vdso_data->processorCount = num_present_cpus();
+#endif /* CONFIG_PPC64 */
+}
+
+/*
+ * Being that cpu_sibling_map is now a per_cpu array, then it cannot
+ * be initialized until the per_cpu areas have been created.  This
+ * function is now called from setup_per_cpu_areas().
+ */
+void __init smp_setup_cpu_sibling_map(void)
+{
+#if defined(CONFIG_PPC64)
+       int cpu;
+
        /*
         * Do the sibling map; assume only two threads per processor.
         */
        for_each_possible_cpu(cpu) {
-               cpu_set(cpu, cpu_sibling_map[cpu]);
+               cpu_set(cpu, per_cpu(cpu_sibling_map, cpu));
                if (cpu_has_feature(CPU_FTR_SMT))
-                       cpu_set(cpu ^ 0x1, cpu_sibling_map[cpu]);
+                       cpu_set(cpu ^ 0x1, per_cpu(cpu_sibling_map, cpu));
        }
-
-       vdso_data->processorCount = num_present_cpus();
 #endif /* CONFIG_PPC64 */
 }
 #endif /* CONFIG_SMP */
index 008ab6823b022a1793259b62697008f15feb90d5..0e014550b83f714912b546c03169ade684e7d274 100644 (file)
@@ -597,6 +597,9 @@ void __init setup_per_cpu_areas(void)
                paca[i].data_offset = ptr - __per_cpu_start;
                memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
        }
+
+       /* Now that per_cpu is setup, initialize cpu_sibling_map */
+       smp_setup_cpu_sibling_map();
 }
 #endif
 
index d30f08fa029772dff196d73b6177924e27aeebc4..338950aeb6f6e3f3d19511e87cc992ad4e3535e4 100644 (file)
@@ -61,11 +61,11 @@ struct thread_info *secondary_ti;
 
 cpumask_t cpu_possible_map = CPU_MASK_NONE;
 cpumask_t cpu_online_map = CPU_MASK_NONE;
-cpumask_t cpu_sibling_map[NR_CPUS] = { [0 ... NR_CPUS-1] = CPU_MASK_NONE };
+DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE;
 
 EXPORT_SYMBOL(cpu_online_map);
 EXPORT_SYMBOL(cpu_possible_map);
-EXPORT_SYMBOL(cpu_sibling_map);
+EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
 
 /* SMP operations for this machine */
 struct smp_ops_t *smp_ops;
index 5123e9d4164b7c1c6c65898ed20dfb0b23f0cddd..13d5a87f13b17396acdf52800fd0c8c1b30a8984 100644 (file)
@@ -117,7 +117,7 @@ static int cbe_cpufreq_cpu_init(struct cpufreq_policy *policy)
        policy->cur = cbe_freqs[cur_pmode].frequency;
 
 #ifdef CONFIG_SMP
-       policy->cpus = cpu_sibling_map[policy->cpu];
+       policy->cpus = per_cpu(cpu_sibling_map, policy->cpu);
 #endif
 
        cpufreq_frequency_table_get_attr(cbe_freqs, policy->cpu);
index c73b7a48b0369ae970edf7d103672621eacb2e21..407d74a8a5421718114232a2cc55a769d3447b78 100644 (file)
@@ -52,14 +52,13 @@ int sparc64_multi_core __read_mostly;
 
 cpumask_t cpu_possible_map __read_mostly = CPU_MASK_NONE;
 cpumask_t cpu_online_map __read_mostly = CPU_MASK_NONE;
-cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly =
-       { [0 ... NR_CPUS-1] = CPU_MASK_NONE };
+DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE;
 cpumask_t cpu_core_map[NR_CPUS] __read_mostly =
        { [0 ... NR_CPUS-1] = CPU_MASK_NONE };
 
 EXPORT_SYMBOL(cpu_possible_map);
 EXPORT_SYMBOL(cpu_online_map);
-EXPORT_SYMBOL(cpu_sibling_map);
+EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
 EXPORT_SYMBOL(cpu_core_map);
 
 static cpumask_t smp_commenced_mask;
@@ -1261,16 +1260,16 @@ void __devinit smp_fill_in_sib_core_maps(void)
        for_each_present_cpu(i) {
                unsigned int j;
 
-               cpus_clear(cpu_sibling_map[i]);
+               cpus_clear(per_cpu(cpu_sibling_map, i));
                if (cpu_data(i).proc_id == -1) {
-                       cpu_set(i, cpu_sibling_map[i]);
+                       cpu_set(i, per_cpu(cpu_sibling_map, i));
                        continue;
                }
 
                for_each_present_cpu(j) {
                        if (cpu_data(i).proc_id ==
                            cpu_data(j).proc_id)
-                               cpu_set(j, cpu_sibling_map[i]);
+                               cpu_set(j, per_cpu(cpu_sibling_map, i));
                }
        }
 }
@@ -1342,9 +1341,9 @@ int __cpu_disable(void)
                cpu_clear(cpu, cpu_core_map[i]);
        cpus_clear(cpu_core_map[cpu]);
 
-       for_each_cpu_mask(i, cpu_sibling_map[cpu])
-               cpu_clear(cpu, cpu_sibling_map[i]);
-       cpus_clear(cpu_sibling_map[cpu]);
+       for_each_cpu_mask(i, per_cpu(cpu_sibling_map, cpu))
+               cpu_clear(cpu, per_cpu(cpu_sibling_map, i));
+       cpus_clear(per_cpu(cpu_sibling_map, cpu));
 
        c = &cpu_data(cpu);
 
index 8eb414b906d29f7e20f93fdc5d0e6e0edfe21da1..793eae854f4f779e26ba95404a054c94349cc395 100644 (file)
@@ -200,7 +200,7 @@ static int cpufreq_p4_cpu_init(struct cpufreq_policy *policy)
        unsigned int i;
 
 #ifdef CONFIG_SMP
-       policy->cpus = cpu_sibling_map[policy->cpu];
+       policy->cpus = per_cpu(cpu_sibling_map, policy->cpu);
 #endif
 
        /* Errata workaround */
index 36685e8f7be1171cf260d75646be0dff7f00a472..14d68aa301eea923ebff083e7cfbc5ad42a1083d 100644 (file)
@@ -322,7 +322,7 @@ static int speedstep_cpu_init(struct cpufreq_policy *policy)
 
        /* only run on CPU to be set, or on its sibling */
 #ifdef CONFIG_SMP
-       policy->cpus = cpu_sibling_map[policy->cpu];
+       policy->cpus = per_cpu(cpu_sibling_map, policy->cpu);
 #endif
 
        cpus_allowed = current->cpus_allowed;
index e2f4a1c685476c0025e88007c674e939ae19574d..4ee1e5ee9b5732f3bd0dca4f771df97287ba86bd 100644 (file)
@@ -378,7 +378,7 @@ static struct irq_cpu_info {
 
 #define IRQ_ALLOWED(cpu, allowed_mask) cpu_isset(cpu, allowed_mask)
 
-#define CPU_TO_PACKAGEINDEX(i) (first_cpu(cpu_sibling_map[i]))
+#define CPU_TO_PACKAGEINDEX(i) (first_cpu(per_cpu(cpu_sibling_map, i)))
 
 static cpumask_t balance_irq_affinity[NR_IRQS] = {
        [0 ... NR_IRQS-1] = CPU_MASK_ALL
@@ -598,7 +598,7 @@ tryanotherirq:
         * (A+B)/2 vs B
         */
        load = CPU_IRQ(min_loaded) >> 1;
-       for_each_cpu_mask(j, cpu_sibling_map[min_loaded]) {
+       for_each_cpu_mask(j, per_cpu(cpu_sibling_map, min_loaded)) {
                if (load > CPU_IRQ(j)) {
                        /* This won't change cpu_sibling_map[min_loaded] */
                        load = CPU_IRQ(j);
index 4cbab48ba86548ebfeb788b5d096ad2e0a596508..31fc08bd15ef2d1a2c4fc950d09de6ef9a83a2ab 100644 (file)
@@ -70,8 +70,8 @@ EXPORT_SYMBOL(smp_num_siblings);
 int cpu_llc_id[NR_CPUS] __cpuinitdata = {[0 ... NR_CPUS-1] = BAD_APICID};
 
 /* representing HT siblings of each logical CPU */
-cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly;
-EXPORT_SYMBOL(cpu_sibling_map);
+DEFINE_PER_CPU(cpumask_t, cpu_sibling_map);
+EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
 
 /* representing HT and core siblings of each logical CPU */
 DEFINE_PER_CPU(cpumask_t, cpu_core_map);
@@ -319,8 +319,8 @@ void __cpuinit set_cpu_sibling_map(int cpu)
                for_each_cpu_mask(i, cpu_sibling_setup_map) {
                        if (c[cpu].phys_proc_id == c[i].phys_proc_id &&
                            c[cpu].cpu_core_id == c[i].cpu_core_id) {
-                               cpu_set(i, cpu_sibling_map[cpu]);
-                               cpu_set(cpu, cpu_sibling_map[i]);
+                               cpu_set(i, per_cpu(cpu_sibling_map, cpu));
+                               cpu_set(cpu, per_cpu(cpu_sibling_map, i));
                                cpu_set(i, per_cpu(cpu_core_map, cpu));
                                cpu_set(cpu, per_cpu(cpu_core_map, i));
                                cpu_set(i, c[cpu].llc_shared_map);
@@ -328,13 +328,13 @@ void __cpuinit set_cpu_sibling_map(int cpu)
                        }
                }
        } else {
-               cpu_set(cpu, cpu_sibling_map[cpu]);
+               cpu_set(cpu, per_cpu(cpu_sibling_map, cpu));
        }
 
        cpu_set(cpu, c[cpu].llc_shared_map);
 
        if (current_cpu_data.x86_max_cores == 1) {
-               per_cpu(cpu_core_map, cpu) = cpu_sibling_map[cpu];
+               per_cpu(cpu_core_map, cpu) = per_cpu(cpu_sibling_map, cpu);
                c[cpu].booted_cores = 1;
                return;
        }
@@ -351,12 +351,12 @@ void __cpuinit set_cpu_sibling_map(int cpu)
                        /*
                         *  Does this new cpu bringup a new core?
                         */
-                       if (cpus_weight(cpu_sibling_map[cpu]) == 1) {
+                       if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1) {
                                /*
                                 * for each core in package, increment
                                 * the booted_cores for this new cpu
                                 */
-                               if (first_cpu(cpu_sibling_map[i]) == i)
+                               if (first_cpu(per_cpu(cpu_sibling_map, i)) == i)
                                        c[cpu].booted_cores++;
                                /*
                                 * increment the core count for all
@@ -983,7 +983,7 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
                        printk(KERN_NOTICE "Local APIC not detected."
                                           " Using dummy APIC emulation.\n");
                map_cpu_to_logical_apicid();
-               cpu_set(0, cpu_sibling_map[0]);
+               cpu_set(0, per_cpu(cpu_sibling_map, 0));
                cpu_set(0, per_cpu(cpu_core_map, 0));
                return;
        }
@@ -1008,7 +1008,7 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
                printk(KERN_ERR "... forcing use of dummy APIC emulation. (tell your hw vendor)\n");
                smpboot_clear_io_apic_irqs();
                phys_cpu_present_map = physid_mask_of_physid(0);
-               cpu_set(0, cpu_sibling_map[0]);
+               cpu_set(0, per_cpu(cpu_sibling_map, 0));
                cpu_set(0, per_cpu(cpu_core_map, 0));
                return;
        }
@@ -1023,7 +1023,7 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
                printk(KERN_INFO "SMP mode deactivated, forcing use of dummy APIC emulation.\n");
                smpboot_clear_io_apic_irqs();
                phys_cpu_present_map = physid_mask_of_physid(0);
-               cpu_set(0, cpu_sibling_map[0]);
+               cpu_set(0, per_cpu(cpu_sibling_map, 0));
                cpu_set(0, per_cpu(cpu_core_map, 0));
                return;
        }
@@ -1102,15 +1102,15 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
        Dprintk("Boot done.\n");
 
        /*
-        * construct cpu_sibling_map[], so that we can tell sibling CPUs
+        * construct cpu_sibling_map, so that we can tell sibling CPUs
         * efficiently.
         */
        for (cpu = 0; cpu < NR_CPUS; cpu++) {
-               cpus_clear(cpu_sibling_map[cpu]);
+               cpus_clear(per_cpu(cpu_sibling_map, cpu));
                cpus_clear(per_cpu(cpu_core_map, cpu));
        }
 
-       cpu_set(0, cpu_sibling_map[0]);
+       cpu_set(0, per_cpu(cpu_sibling_map, 0));
        cpu_set(0, per_cpu(cpu_core_map, 0));
 
        smpboot_setup_io_apic();
@@ -1153,13 +1153,13 @@ void remove_siblinginfo(int cpu)
                /*/
                 * last thread sibling in this cpu core going down
                 */
-               if (cpus_weight(cpu_sibling_map[cpu]) == 1)
+               if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1)
                        c[sibling].booted_cores--;
        }
                        
-       for_each_cpu_mask(sibling, cpu_sibling_map[cpu])
-               cpu_clear(cpu, cpu_sibling_map[sibling]);
-       cpus_clear(cpu_sibling_map[cpu]);
+       for_each_cpu_mask(sibling, per_cpu(cpu_sibling_map, cpu))
+               cpu_clear(cpu, per_cpu(cpu_sibling_map, sibling));
+       cpus_clear(per_cpu(cpu_sibling_map, cpu));
        cpus_clear(per_cpu(cpu_core_map, cpu));
        c[cpu].phys_proc_id = 0;
        c[cpu].cpu_core_id = 0;
index 6723c8622828829c3e18ef447b1ab9cb04d2d0ab..0faa0a0af27246a655bfa07ef8ae5389b1af09e1 100644 (file)
@@ -91,8 +91,8 @@ EXPORT_SYMBOL(cpu_data);
 int smp_threads_ready;
 
 /* representing HT siblings of each logical CPU */
-cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly;
-EXPORT_SYMBOL(cpu_sibling_map);
+DEFINE_PER_CPU(cpumask_t, cpu_sibling_map);
+EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
 
 /* representing HT and core siblings of each logical CPU */
 DEFINE_PER_CPU(cpumask_t, cpu_core_map);
@@ -262,8 +262,8 @@ static inline void set_cpu_sibling_map(int cpu)
                for_each_cpu_mask(i, cpu_sibling_setup_map) {
                        if (c[cpu].phys_proc_id == c[i].phys_proc_id &&
                            c[cpu].cpu_core_id == c[i].cpu_core_id) {
-                               cpu_set(i, cpu_sibling_map[cpu]);
-                               cpu_set(cpu, cpu_sibling_map[i]);
+                               cpu_set(i, per_cpu(cpu_sibling_map, cpu));
+                               cpu_set(cpu, per_cpu(cpu_sibling_map, i));
                                cpu_set(i, per_cpu(cpu_core_map, cpu));
                                cpu_set(cpu, per_cpu(cpu_core_map, i));
                                cpu_set(i, c[cpu].llc_shared_map);
@@ -271,13 +271,13 @@ static inline void set_cpu_sibling_map(int cpu)
                        }
                }
        } else {
-               cpu_set(cpu, cpu_sibling_map[cpu]);
+               cpu_set(cpu, per_cpu(cpu_sibling_map, cpu));
        }
 
        cpu_set(cpu, c[cpu].llc_shared_map);
 
        if (current_cpu_data.x86_max_cores == 1) {
-               per_cpu(cpu_core_map, cpu) = cpu_sibling_map[cpu];
+               per_cpu(cpu_core_map, cpu) = per_cpu(cpu_sibling_map, cpu);
                c[cpu].booted_cores = 1;
                return;
        }
@@ -294,12 +294,12 @@ static inline void set_cpu_sibling_map(int cpu)
                        /*
                         *  Does this new cpu bringup a new core?
                         */
-                       if (cpus_weight(cpu_sibling_map[cpu]) == 1) {
+                       if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1) {
                                /*
                                 * for each core in package, increment
                                 * the booted_cores for this new cpu
                                 */
-                               if (first_cpu(cpu_sibling_map[i]) == i)
+                               if (first_cpu(per_cpu(cpu_sibling_map, i)) == i)
                                        c[cpu].booted_cores++;
                                /*
                                 * increment the core count for all
@@ -735,7 +735,7 @@ static __init void disable_smp(void)
                phys_cpu_present_map = physid_mask_of_physid(boot_cpu_id);
        else
                phys_cpu_present_map = physid_mask_of_physid(0);
-       cpu_set(0, cpu_sibling_map[0]);
+       cpu_set(0, per_cpu(cpu_sibling_map, 0));
        cpu_set(0, per_cpu(cpu_core_map, 0));
 }
 
@@ -976,13 +976,13 @@ static void remove_siblinginfo(int cpu)
                /*
                 * last thread sibling in this cpu core going down
                 */
-               if (cpus_weight(cpu_sibling_map[cpu]) == 1)
+               if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1)
                        c[sibling].booted_cores--;
        }
                        
-       for_each_cpu_mask(sibling, cpu_sibling_map[cpu])
-               cpu_clear(cpu, cpu_sibling_map[sibling]);
-       cpus_clear(cpu_sibling_map[cpu]);
+       for_each_cpu_mask(sibling, per_cpu(cpu_sibling_map, cpu))
+               cpu_clear(cpu, per_cpu(cpu_sibling_map, sibling));
+       cpus_clear(per_cpu(cpu_sibling_map, cpu));
        cpus_clear(per_cpu(cpu_core_map, cpu));
        c[cpu].phys_proc_id = 0;
        c[cpu].cpu_core_id = 0;
index 47925927b12f7cd7a7d293a021dc063d7462367c..56b4757a1f4706cef329fac43c38e71fcd491bd6 100644 (file)
@@ -379,7 +379,7 @@ static unsigned int get_stagger(void)
 {
 #ifdef CONFIG_SMP
        int cpu = smp_processor_id();
-       return (cpu != first_cpu(cpu_sibling_map[cpu]));
+       return (cpu != first_cpu(per_cpu(cpu_sibling_map, cpu)));
 #endif 
        return 0;
 }
index 539d42530fc49ebcdc9170d2abc99ea433e647f8..4fa33c27ccb6d4d4e6a7619f7bd8c46c25f36a2c 100644 (file)
@@ -147,7 +147,7 @@ void __init xen_smp_prepare_boot_cpu(void)
        make_lowmem_page_readwrite(&per_cpu__gdt_page);
 
        for (cpu = 0; cpu < NR_CPUS; cpu++) {
-               cpus_clear(cpu_sibling_map[cpu]);
+               cpus_clear(per_cpu(cpu_sibling_map, cpu));
                /*
                 * cpu_core_map lives in a per cpu area that is cleared
                 * when the per cpu array is allocated.
@@ -164,7 +164,7 @@ void __init xen_smp_prepare_cpus(unsigned int max_cpus)
        unsigned cpu;
 
        for (cpu = 0; cpu < NR_CPUS; cpu++) {
-               cpus_clear(cpu_sibling_map[cpu]);
+               cpus_clear(per_cpu(cpu_sibling_map, cpu));
                /*
                 * cpu_core_ map will be zeroed when the per
                 * cpu area is allocated.
index 775471ef84a536159da907711070e889659984e2..d00ac3993c188964278bbba6bb6b3bc7827b3544 100644 (file)
@@ -550,7 +550,7 @@ static void blk_trace_set_ht_offsets(void)
        for_each_online_cpu(cpu) {
                unsigned long long *cpu_off, *sibling_off;
 
-               for_each_cpu_mask(i, cpu_sibling_map[cpu]) {
+               for_each_cpu_mask(i, per_cpu(cpu_sibling_map, cpu)) {
                        if (i == cpu)
                                continue;
 
index 6314b29e8c4d6e32f249f3fb9376bc02e86ec729..1703c9d885bd0723b186f2ca8a673741b1d666cf 100644 (file)
@@ -58,7 +58,7 @@ extern char no_int_routing __devinitdata;
 
 extern cpumask_t cpu_online_map;
 extern cpumask_t cpu_core_map[NR_CPUS];
-extern cpumask_t cpu_sibling_map[NR_CPUS];
+DECLARE_PER_CPU(cpumask_t, cpu_sibling_map);
 extern int smp_num_siblings;
 extern int smp_num_cpucores;
 extern void __iomem *ipi_base_addr;
index 233f1caae048b2b5454c255248bd664d600eefc9..2d67b72b18d07049d5ba0f46fea96369605a39ba 100644 (file)
@@ -112,7 +112,7 @@ void build_cpu_to_node_map(void);
 #define topology_physical_package_id(cpu)      (cpu_data(cpu)->socket_id)
 #define topology_core_id(cpu)                  (cpu_data(cpu)->core_id)
 #define topology_core_siblings(cpu)            (cpu_core_map[cpu])
-#define topology_thread_siblings(cpu)          (cpu_sibling_map[cpu])
+#define topology_thread_siblings(cpu)          (per_cpu(cpu_sibling_map, cpu))
 #define smt_capable()                          (smp_num_siblings > 1)
 #endif
 
index 19102bfc14cab5207b97d323e7b4bb6137b004fe..505f35bacaa913a1d339406f0be1f1599389572f 100644 (file)
@@ -26,6 +26,7 @@
 #ifdef CONFIG_PPC64
 #include <asm/paca.h>
 #endif
+#include <asm/percpu.h>
 
 extern int boot_cpuid;
 
@@ -58,7 +59,7 @@ extern int smp_hw_index[];
                                        (smp_hw_index[(cpu)] = (phys))
 #endif
 
-extern cpumask_t cpu_sibling_map[NR_CPUS];
+DECLARE_PER_CPU(cpumask_t, cpu_sibling_map);
 
 /* Since OpenPIC has only 4 IPIs, we use slightly different message numbers.
  *
@@ -77,6 +78,7 @@ void smp_init_pSeries(void);
 void smp_init_cell(void);
 void smp_init_celleb(void);
 void smp_setup_cpu_maps(void);
+void smp_setup_cpu_sibling_map(void);
 
 extern int __cpu_disable(void);
 extern void __cpu_die(unsigned int cpu);
index 0ad21a849b5fb5dad0b4e50a88df61b8a4bf1a13..ca23b681ad058e7f984d9bffff2f1b71af52382e 100644 (file)
@@ -108,7 +108,7 @@ static inline void sysfs_remove_device_from_node(struct sys_device *dev,
 #ifdef CONFIG_PPC64
 #include <asm/smp.h>
 
-#define topology_thread_siblings(cpu)  (cpu_sibling_map[cpu])
+#define topology_thread_siblings(cpu)  (per_cpu(cpu_sibling_map, cpu))
 #endif
 #endif
 
index e8a96a31761bad24b17d33911f1e942e26a1b415..42c09949526cd53f3b259b4912463a77851edbee 100644 (file)
@@ -28,8 +28,9 @@
  
 #include <asm/bitops.h>
 #include <asm/atomic.h>
+#include <asm/percpu.h>
 
-extern cpumask_t cpu_sibling_map[NR_CPUS];
+DECLARE_PER_CPU(cpumask_t, cpu_sibling_map);
 extern cpumask_t cpu_core_map[NR_CPUS];
 extern int sparc64_multi_core;
 
index 290ac75f385bee443a24eb9bba11fb70783edc46..c6b557034f686623ab59a6a109b92302b5e4a3d2 100644 (file)
@@ -5,7 +5,7 @@
 #define topology_physical_package_id(cpu)      (cpu_data(cpu).proc_id)
 #define topology_core_id(cpu)                  (cpu_data(cpu).core_id)
 #define topology_core_siblings(cpu)            (cpu_core_map[cpu])
-#define topology_thread_siblings(cpu)          (cpu_sibling_map[cpu])
+#define topology_thread_siblings(cpu)          (per_cpu(cpu_sibling_map, cpu))
 #define mc_capable()                           (sparc64_multi_core)
 #define smt_capable()                          (sparc64_multi_core)
 #endif /* CONFIG_SMP */
index 01ab31bb262a57ce9518b4063a23dc0a5da78286..955dd7c8538f1a1dc2a75436d780bc33dba923fc 100644 (file)
@@ -30,7 +30,7 @@
 extern void smp_alloc_memory(void);
 extern int pic_mode;
 extern int smp_num_siblings;
-extern cpumask_t cpu_sibling_map[];
+DECLARE_PER_CPU(cpumask_t, cpu_sibling_map);
 DECLARE_PER_CPU(cpumask_t, cpu_core_map);
 
 extern void (*mtrr_hook) (void);
index 65f8448644151ed13b2eac23df5b5988400b0cdf..f5bcee1c0927e67d9194e25fc5a02c07fea671da 100644 (file)
@@ -38,12 +38,14 @@ extern void unlock_ipi_call_lock(void);
 extern int smp_num_siblings;
 extern void smp_send_reschedule(int cpu);
 
-extern cpumask_t cpu_sibling_map[NR_CPUS];
 /*
- * cpu_core_map lives in a per cpu area
+ * cpu_sibling_map and cpu_core_map now live
+ * in the per cpu area
  *
+ * extern cpumask_t cpu_sibling_map[NR_CPUS];
  * extern cpumask_t cpu_core_map[NR_CPUS];
  */
+DECLARE_PER_CPU(cpumask_t, cpu_sibling_map);
 DECLARE_PER_CPU(cpumask_t, cpu_core_map);
 extern u8 cpu_llc_id[NR_CPUS];
 
index 7b68dbcd0eb015fa13683821888ada8d85c74123..ae1074603c4b1e4508059fc40d08ef83d1ecb99a 100644 (file)
@@ -31,7 +31,7 @@
 #define topology_physical_package_id(cpu)      (cpu_data[cpu].phys_proc_id)
 #define topology_core_id(cpu)                  (cpu_data[cpu].cpu_core_id)
 #define topology_core_siblings(cpu)            (per_cpu(cpu_core_map, cpu))
-#define topology_thread_siblings(cpu)          (cpu_sibling_map[cpu])
+#define topology_thread_siblings(cpu)          (per_cpu(cpu_sibling_map, cpu))
 #endif
 
 #ifdef CONFIG_NUMA
index b8590dff34c8d8957d0451dd604bb4910d794a8b..848c17f922262d0e7fd9733c0baa1b7a58e347a0 100644 (file)
@@ -59,7 +59,7 @@ extern int __node_distance(int, int);
 #define topology_physical_package_id(cpu)      (cpu_data[cpu].phys_proc_id)
 #define topology_core_id(cpu)                  (cpu_data[cpu].cpu_core_id)
 #define topology_core_siblings(cpu)            (per_cpu(cpu_core_map, cpu))
-#define topology_thread_siblings(cpu)          (cpu_sibling_map[cpu])
+#define topology_thread_siblings(cpu)          (per_cpu(cpu_sibling_map, cpu))
 #define mc_capable()                   (boot_cpu_data.x86_max_cores > 1)
 #define smt_capable()                  (smp_num_siblings > 1)
 #endif
index bba57adb95044ba215caa58ee7bdd514ccd71c1c..78c8fbd373a39046304c908d4d35d29ad130c25f 100644 (file)
@@ -5869,7 +5869,7 @@ static int cpu_to_core_group(int cpu, const cpumask_t *cpu_map,
                             struct sched_group **sg)
 {
        int group;
-       cpumask_t mask = cpu_sibling_map[cpu];
+       cpumask_t mask = per_cpu(cpu_sibling_map, cpu);
        cpus_and(mask, mask, *cpu_map);
        group = first_cpu(mask);
        if (sg)
@@ -5898,7 +5898,7 @@ static int cpu_to_phys_group(int cpu, const cpumask_t *cpu_map,
        cpus_and(mask, mask, *cpu_map);
        group = first_cpu(mask);
 #elif defined(CONFIG_SCHED_SMT)
-       cpumask_t mask = cpu_sibling_map[cpu];
+       cpumask_t mask = per_cpu(cpu_sibling_map, cpu);
        cpus_and(mask, mask, *cpu_map);
        group = first_cpu(mask);
 #else
@@ -6132,7 +6132,7 @@ static int build_sched_domains(const cpumask_t *cpu_map)
                p = sd;
                sd = &per_cpu(cpu_domains, i);
                *sd = SD_SIBLING_INIT;
-               sd->span = cpu_sibling_map[i];
+               sd->span = per_cpu(cpu_sibling_map, i);
                cpus_and(sd->span, sd->span, *cpu_map);
                sd->parent = p;
                p->child = sd;
@@ -6143,7 +6143,7 @@ static int build_sched_domains(const cpumask_t *cpu_map)
 #ifdef CONFIG_SCHED_SMT
        /* Set up CPU (sibling) groups */
        for_each_cpu_mask(i, *cpu_map) {
-               cpumask_t this_sibling_map = cpu_sibling_map[i];
+               cpumask_t this_sibling_map = per_cpu(cpu_sibling_map, i);
                cpus_and(this_sibling_map, this_sibling_map, *cpu_map);
                if (i != first_cpu(this_sibling_map))
                        continue;