]> err.no Git - linux-2.6/commitdiff
x86: move sibling functions to common file
authorGlauber Costa <gcosta@redhat.com>
Mon, 3 Mar 2008 17:13:02 +0000 (14:13 -0300)
committerIngo Molnar <mingo@elte.hu>
Thu, 17 Apr 2008 15:40:56 +0000 (17:40 +0200)
set_cpu_sibling_map() and remove_sibling_info() are
equal between architectures, and are now moved to common
file

Signed-off-by: Glauber Costa <gcosta@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
arch/x86/kernel/smpboot.c
arch/x86/kernel/smpboot_32.c
arch/x86/kernel/smpboot_64.c

index 40a3b56952ef3373272eaa40a8f1631a8b39ca10..d774520a6b48ec5a265a38839b19b2baf8752bf3 100644 (file)
@@ -29,7 +29,95 @@ EXPORT_PER_CPU_SYMBOL(cpu_core_map);
 /* Per CPU bogomips and other parameters */
 DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info);
 EXPORT_PER_CPU_SYMBOL(cpu_info);
+
+/* representing cpus for which sibling maps can be computed */
+static cpumask_t cpu_sibling_setup_map;
+
+void __cpuinit set_cpu_sibling_map(int cpu)
+{
+       int i;
+       struct cpuinfo_x86 *c = &cpu_data(cpu);
+
+       cpu_set(cpu, cpu_sibling_setup_map);
+
+       if (smp_num_siblings > 1) {
+               for_each_cpu_mask(i, cpu_sibling_setup_map) {
+                       if (c->phys_proc_id == cpu_data(i).phys_proc_id &&
+                           c->cpu_core_id == cpu_data(i).cpu_core_id) {
+                               cpu_set(i, per_cpu(cpu_sibling_map, cpu));
+                               cpu_set(cpu, per_cpu(cpu_sibling_map, i));
+                               cpu_set(i, per_cpu(cpu_core_map, cpu));
+                               cpu_set(cpu, per_cpu(cpu_core_map, i));
+                               cpu_set(i, c->llc_shared_map);
+                               cpu_set(cpu, cpu_data(i).llc_shared_map);
+                       }
+               }
+       } else {
+               cpu_set(cpu, per_cpu(cpu_sibling_map, cpu));
+       }
+
+       cpu_set(cpu, c->llc_shared_map);
+
+       if (current_cpu_data.x86_max_cores == 1) {
+               per_cpu(cpu_core_map, cpu) = per_cpu(cpu_sibling_map, cpu);
+               c->booted_cores = 1;
+               return;
+       }
+
+       for_each_cpu_mask(i, cpu_sibling_setup_map) {
+               if (per_cpu(cpu_llc_id, cpu) != BAD_APICID &&
+                   per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) {
+                       cpu_set(i, c->llc_shared_map);
+                       cpu_set(cpu, cpu_data(i).llc_shared_map);
+               }
+               if (c->phys_proc_id == cpu_data(i).phys_proc_id) {
+                       cpu_set(i, per_cpu(cpu_core_map, cpu));
+                       cpu_set(cpu, per_cpu(cpu_core_map, i));
+                       /*
+                        *  Does this new cpu bringup a new core?
+                        */
+                       if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1) {
+                               /*
+                                * for each core in package, increment
+                                * the booted_cores for this new cpu
+                                */
+                               if (first_cpu(per_cpu(cpu_sibling_map, i)) == i)
+                                       c->booted_cores++;
+                               /*
+                                * increment the core count for all
+                                * the other cpus in this package
+                                */
+                               if (i != cpu)
+                                       cpu_data(i).booted_cores++;
+                       } else if (i != cpu && !c->booted_cores)
+                               c->booted_cores = cpu_data(i).booted_cores;
+               }
+       }
+}
+
 #ifdef CONFIG_HOTPLUG_CPU
+void remove_siblinginfo(int cpu)
+{
+       int sibling;
+       struct cpuinfo_x86 *c = &cpu_data(cpu);
+
+       for_each_cpu_mask(sibling, per_cpu(cpu_core_map, cpu)) {
+               cpu_clear(cpu, per_cpu(cpu_core_map, sibling));
+               /*/
+                * last thread sibling in this cpu core going down
+                */
+               if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1)
+                       cpu_data(sibling).booted_cores--;
+       }
+
+       for_each_cpu_mask(sibling, per_cpu(cpu_sibling_map, cpu))
+               cpu_clear(cpu, per_cpu(cpu_sibling_map, sibling));
+       cpus_clear(per_cpu(cpu_sibling_map, cpu));
+       cpus_clear(per_cpu(cpu_core_map, cpu));
+       c->phys_proc_id = 0;
+       c->cpu_core_id = 0;
+       cpu_clear(cpu, cpu_sibling_setup_map);
+}
 
 int additional_cpus __initdata = -1;
 
index 0fbc98163b4ebf3fc86d543485df2c8d3cb25166..322f46674d427aa37be02af294637a29edf2be53 100644 (file)
@@ -274,71 +274,6 @@ cpumask_t cpu_coregroup_map(int cpu)
                return c->llc_shared_map;
 }
 
-/* representing cpus for which sibling maps can be computed */
-static cpumask_t cpu_sibling_setup_map;
-
-void __cpuinit set_cpu_sibling_map(int cpu)
-{
-       int i;
-       struct cpuinfo_x86 *c = &cpu_data(cpu);
-
-       cpu_set(cpu, cpu_sibling_setup_map);
-
-       if (smp_num_siblings > 1) {
-               for_each_cpu_mask(i, cpu_sibling_setup_map) {
-                       if (c->phys_proc_id == cpu_data(i).phys_proc_id &&
-                           c->cpu_core_id == cpu_data(i).cpu_core_id) {
-                               cpu_set(i, per_cpu(cpu_sibling_map, cpu));
-                               cpu_set(cpu, per_cpu(cpu_sibling_map, i));
-                               cpu_set(i, per_cpu(cpu_core_map, cpu));
-                               cpu_set(cpu, per_cpu(cpu_core_map, i));
-                               cpu_set(i, c->llc_shared_map);
-                               cpu_set(cpu, cpu_data(i).llc_shared_map);
-                       }
-               }
-       } else {
-               cpu_set(cpu, per_cpu(cpu_sibling_map, cpu));
-       }
-
-       cpu_set(cpu, c->llc_shared_map);
-
-       if (current_cpu_data.x86_max_cores == 1) {
-               per_cpu(cpu_core_map, cpu) = per_cpu(cpu_sibling_map, cpu);
-               c->booted_cores = 1;
-               return;
-       }
-
-       for_each_cpu_mask(i, cpu_sibling_setup_map) {
-               if (per_cpu(cpu_llc_id, cpu) != BAD_APICID &&
-                   per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) {
-                       cpu_set(i, c->llc_shared_map);
-                       cpu_set(cpu, cpu_data(i).llc_shared_map);
-               }
-               if (c->phys_proc_id == cpu_data(i).phys_proc_id) {
-                       cpu_set(i, per_cpu(cpu_core_map, cpu));
-                       cpu_set(cpu, per_cpu(cpu_core_map, i));
-                       /*
-                        *  Does this new cpu bringup a new core?
-                        */
-                       if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1) {
-                               /*
-                                * for each core in package, increment
-                                * the booted_cores for this new cpu
-                                */
-                               if (first_cpu(per_cpu(cpu_sibling_map, i)) == i)
-                                       c->booted_cores++;
-                               /*
-                                * increment the core count for all
-                                * the other cpus in this package
-                                */
-                               if (i != cpu)
-                                       cpu_data(i).booted_cores++;
-                       } else if (i != cpu && !c->booted_cores)
-                               c->booted_cores = cpu_data(i).booted_cores;
-               }
-       }
-}
-
 /*
  * Activate a secondary processor.
  */
@@ -1120,29 +1055,6 @@ void __init native_smp_prepare_boot_cpu(void)
 }
 
 #ifdef CONFIG_HOTPLUG_CPU
-void remove_siblinginfo(int cpu)
-{
-       int sibling;
-       struct cpuinfo_x86 *c = &cpu_data(cpu);
-
-       for_each_cpu_mask(sibling, per_cpu(cpu_core_map, cpu)) {
-               cpu_clear(cpu, per_cpu(cpu_core_map, sibling));
-               /*/
-                * last thread sibling in this cpu core going down
-                */
-               if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1)
-                       cpu_data(sibling).booted_cores--;
-       }
-                       
-       for_each_cpu_mask(sibling, per_cpu(cpu_sibling_map, cpu))
-               cpu_clear(cpu, per_cpu(cpu_sibling_map, sibling));
-       cpus_clear(per_cpu(cpu_sibling_map, cpu));
-       cpus_clear(per_cpu(cpu_core_map, cpu));
-       c->phys_proc_id = 0;
-       c->cpu_core_id = 0;
-       cpu_clear(cpu, cpu_sibling_setup_map);
-}
-
 int __cpu_disable(void)
 {
        cpumask_t map = cpu_online_map;
index 20f1c7df86a3310b9eb40c1a888767358af530c8..329f9c53a335b4867c33f736a6c593b41b301ec7 100644 (file)
@@ -225,71 +225,6 @@ cpumask_t cpu_coregroup_map(int cpu)
                return c->llc_shared_map;
 }
 
-/* representing cpus for which sibling maps can be computed */
-static cpumask_t cpu_sibling_setup_map;
-
-void __cpuinit set_cpu_sibling_map(int cpu)
-{
-       int i;
-       struct cpuinfo_x86 *c = &cpu_data(cpu);
-
-       cpu_set(cpu, cpu_sibling_setup_map);
-
-       if (smp_num_siblings > 1) {
-               for_each_cpu_mask(i, cpu_sibling_setup_map) {
-                       if (c->phys_proc_id == cpu_data(i).phys_proc_id &&
-                           c->cpu_core_id == cpu_data(i).cpu_core_id) {
-                               cpu_set(i, per_cpu(cpu_sibling_map, cpu));
-                               cpu_set(cpu, per_cpu(cpu_sibling_map, i));
-                               cpu_set(i, per_cpu(cpu_core_map, cpu));
-                               cpu_set(cpu, per_cpu(cpu_core_map, i));
-                               cpu_set(i, c->llc_shared_map);
-                               cpu_set(cpu, cpu_data(i).llc_shared_map);
-                       }
-               }
-       } else {
-               cpu_set(cpu, per_cpu(cpu_sibling_map, cpu));
-       }
-
-       cpu_set(cpu, c->llc_shared_map);
-
-       if (current_cpu_data.x86_max_cores == 1) {
-               per_cpu(cpu_core_map, cpu) = per_cpu(cpu_sibling_map, cpu);
-               c->booted_cores = 1;
-               return;
-       }
-
-       for_each_cpu_mask(i, cpu_sibling_setup_map) {
-               if (per_cpu(cpu_llc_id, cpu) != BAD_APICID &&
-                   per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) {
-                       cpu_set(i, c->llc_shared_map);
-                       cpu_set(cpu, cpu_data(i).llc_shared_map);
-               }
-               if (c->phys_proc_id == cpu_data(i).phys_proc_id) {
-                       cpu_set(i, per_cpu(cpu_core_map, cpu));
-                       cpu_set(cpu, per_cpu(cpu_core_map, i));
-                       /*
-                        *  Does this new cpu bringup a new core?
-                        */
-                       if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1) {
-                               /*
-                                * for each core in package, increment
-                                * the booted_cores for this new cpu
-                                */
-                               if (first_cpu(per_cpu(cpu_sibling_map, i)) == i)
-                                       c->booted_cores++;
-                               /*
-                                * increment the core count for all
-                                * the other cpus in this package
-                                */
-                               if (i != cpu)
-                                       cpu_data(i).booted_cores++;
-                       } else if (i != cpu && !c->booted_cores)
-                               c->booted_cores = cpu_data(i).booted_cores;
-               }
-       }
-}
-
 /*
  * Setup code on secondary processor (after comming out of the trampoline)
  */
@@ -917,30 +852,6 @@ void __init native_smp_cpus_done(unsigned int max_cpus)
 }
 
 #ifdef CONFIG_HOTPLUG_CPU
-
-void remove_siblinginfo(int cpu)
-{
-       int sibling;
-       struct cpuinfo_x86 *c = &cpu_data(cpu);
-
-       for_each_cpu_mask(sibling, per_cpu(cpu_core_map, cpu)) {
-               cpu_clear(cpu, per_cpu(cpu_core_map, sibling));
-               /*
-                * last thread sibling in this cpu core going down
-                */
-               if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1)
-                       cpu_data(sibling).booted_cores--;
-       }
-                       
-       for_each_cpu_mask(sibling, per_cpu(cpu_sibling_map, cpu))
-               cpu_clear(cpu, per_cpu(cpu_sibling_map, sibling));
-       cpus_clear(per_cpu(cpu_sibling_map, cpu));
-       cpus_clear(per_cpu(cpu_core_map, cpu));
-       c->phys_proc_id = 0;
-       c->cpu_core_id = 0;
-       cpu_clear(cpu, cpu_sibling_setup_map);
-}
-
 static void __ref remove_cpu_from_maps(void)
 {
        int cpu = smp_processor_id();