]> err.no Git - linux-2.6/blob - arch/x86/kernel/smpboot.c
x86: move sibling functions to common file
[linux-2.6] / arch / x86 / kernel / smpboot.c
1 #include <linux/init.h>
2 #include <linux/smp.h>
3 #include <linux/module.h>
4
5 /* Number of siblings per CPU package */
6 int smp_num_siblings = 1;
7 EXPORT_SYMBOL(smp_num_siblings);
8
9 /* Last level cache ID of each logical CPU */
10 DEFINE_PER_CPU(u16, cpu_llc_id) = BAD_APICID;
11
12 /* bitmap of online cpus */
13 cpumask_t cpu_online_map __read_mostly;
14 EXPORT_SYMBOL(cpu_online_map);
15
16 cpumask_t cpu_callin_map;
17 cpumask_t cpu_callout_map;
18 cpumask_t cpu_possible_map;
19 EXPORT_SYMBOL(cpu_possible_map);
20
21 /* representing HT siblings of each logical CPU */
22 DEFINE_PER_CPU(cpumask_t, cpu_sibling_map);
23 EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
24
25 /* representing HT and core siblings of each logical CPU */
26 DEFINE_PER_CPU(cpumask_t, cpu_core_map);
27 EXPORT_PER_CPU_SYMBOL(cpu_core_map);
28
29 /* Per CPU bogomips and other parameters */
30 DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info);
31 EXPORT_PER_CPU_SYMBOL(cpu_info);
32
33 /* representing cpus for which sibling maps can be computed */
34 static cpumask_t cpu_sibling_setup_map;
35
36 void __cpuinit set_cpu_sibling_map(int cpu)
37 {
38         int i;
39         struct cpuinfo_x86 *c = &cpu_data(cpu);
40
41         cpu_set(cpu, cpu_sibling_setup_map);
42
43         if (smp_num_siblings > 1) {
44                 for_each_cpu_mask(i, cpu_sibling_setup_map) {
45                         if (c->phys_proc_id == cpu_data(i).phys_proc_id &&
46                             c->cpu_core_id == cpu_data(i).cpu_core_id) {
47                                 cpu_set(i, per_cpu(cpu_sibling_map, cpu));
48                                 cpu_set(cpu, per_cpu(cpu_sibling_map, i));
49                                 cpu_set(i, per_cpu(cpu_core_map, cpu));
50                                 cpu_set(cpu, per_cpu(cpu_core_map, i));
51                                 cpu_set(i, c->llc_shared_map);
52                                 cpu_set(cpu, cpu_data(i).llc_shared_map);
53                         }
54                 }
55         } else {
56                 cpu_set(cpu, per_cpu(cpu_sibling_map, cpu));
57         }
58
59         cpu_set(cpu, c->llc_shared_map);
60
61         if (current_cpu_data.x86_max_cores == 1) {
62                 per_cpu(cpu_core_map, cpu) = per_cpu(cpu_sibling_map, cpu);
63                 c->booted_cores = 1;
64                 return;
65         }
66
67         for_each_cpu_mask(i, cpu_sibling_setup_map) {
68                 if (per_cpu(cpu_llc_id, cpu) != BAD_APICID &&
69                     per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) {
70                         cpu_set(i, c->llc_shared_map);
71                         cpu_set(cpu, cpu_data(i).llc_shared_map);
72                 }
73                 if (c->phys_proc_id == cpu_data(i).phys_proc_id) {
74                         cpu_set(i, per_cpu(cpu_core_map, cpu));
75                         cpu_set(cpu, per_cpu(cpu_core_map, i));
76                         /*
77                          *  Does this new cpu bringup a new core?
78                          */
79                         if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1) {
80                                 /*
81                                  * for each core in package, increment
82                                  * the booted_cores for this new cpu
83                                  */
84                                 if (first_cpu(per_cpu(cpu_sibling_map, i)) == i)
85                                         c->booted_cores++;
86                                 /*
87                                  * increment the core count for all
88                                  * the other cpus in this package
89                                  */
90                                 if (i != cpu)
91                                         cpu_data(i).booted_cores++;
92                         } else if (i != cpu && !c->booted_cores)
93                                 c->booted_cores = cpu_data(i).booted_cores;
94                 }
95         }
96 }
97
98 #ifdef CONFIG_HOTPLUG_CPU
99 void remove_siblinginfo(int cpu)
100 {
101         int sibling;
102         struct cpuinfo_x86 *c = &cpu_data(cpu);
103
104         for_each_cpu_mask(sibling, per_cpu(cpu_core_map, cpu)) {
105                 cpu_clear(cpu, per_cpu(cpu_core_map, sibling));
106                 /*/
107                  * last thread sibling in this cpu core going down
108                  */
109                 if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1)
110                         cpu_data(sibling).booted_cores--;
111         }
112
113         for_each_cpu_mask(sibling, per_cpu(cpu_sibling_map, cpu))
114                 cpu_clear(cpu, per_cpu(cpu_sibling_map, sibling));
115         cpus_clear(per_cpu(cpu_sibling_map, cpu));
116         cpus_clear(per_cpu(cpu_core_map, cpu));
117         c->phys_proc_id = 0;
118         c->cpu_core_id = 0;
119         cpu_clear(cpu, cpu_sibling_setup_map);
120 }
121
122 int additional_cpus __initdata = -1;
123
124 static __init int setup_additional_cpus(char *s)
125 {
126         return s && get_option(&s, &additional_cpus) ? 0 : -EINVAL;
127 }
128 early_param("additional_cpus", setup_additional_cpus);
129
130 /*
131  * cpu_possible_map should be static, it cannot change as cpu's
132  * are onlined, or offlined. The reason is per-cpu data-structures
133  * are allocated by some modules at init time, and dont expect to
134  * do this dynamically on cpu arrival/departure.
135  * cpu_present_map on the other hand can change dynamically.
136  * In case when cpu_hotplug is not compiled, then we resort to current
137  * behaviour, which is cpu_possible == cpu_present.
138  * - Ashok Raj
139  *
140  * Three ways to find out the number of additional hotplug CPUs:
141  * - If the BIOS specified disabled CPUs in ACPI/mptables use that.
142  * - The user can overwrite it with additional_cpus=NUM
143  * - Otherwise don't reserve additional CPUs.
144  * We do this because additional CPUs waste a lot of memory.
145  * -AK
146  */
147 __init void prefill_possible_map(void)
148 {
149         int i;
150         int possible;
151
152         if (additional_cpus == -1) {
153                 if (disabled_cpus > 0)
154                         additional_cpus = disabled_cpus;
155                 else
156                         additional_cpus = 0;
157         }
158         possible = num_processors + additional_cpus;
159         if (possible > NR_CPUS)
160                 possible = NR_CPUS;
161
162         printk(KERN_INFO "SMP: Allowing %d CPUs, %d hotplug CPUs\n",
163                 possible, max_t(int, possible - num_processors, 0));
164
165         for (i = 0; i < possible; i++)
166                 cpu_set(i, cpu_possible_map);
167 }
168 #endif
169