1 #include <linux/init.h>
3 #include <linux/module.h>
5 /* Number of siblings per CPU package */
6 int smp_num_siblings = 1;
7 EXPORT_SYMBOL(smp_num_siblings);
9 /* Last level cache ID of each logical CPU */
10 DEFINE_PER_CPU(u16, cpu_llc_id) = BAD_APICID;
12 /* bitmap of online cpus */
13 cpumask_t cpu_online_map __read_mostly;
14 EXPORT_SYMBOL(cpu_online_map);
16 cpumask_t cpu_callin_map;
17 cpumask_t cpu_callout_map;
18 cpumask_t cpu_possible_map;
19 EXPORT_SYMBOL(cpu_possible_map);
21 /* representing HT siblings of each logical CPU */
22 DEFINE_PER_CPU(cpumask_t, cpu_sibling_map);
23 EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
25 /* representing HT and core siblings of each logical CPU */
26 DEFINE_PER_CPU(cpumask_t, cpu_core_map);
27 EXPORT_PER_CPU_SYMBOL(cpu_core_map);
29 /* Per CPU bogomips and other parameters */
30 DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info);
31 EXPORT_PER_CPU_SYMBOL(cpu_info);
33 /* representing cpus for which sibling maps can be computed */
34 static cpumask_t cpu_sibling_setup_map;
36 void __cpuinit set_cpu_sibling_map(int cpu)
39 struct cpuinfo_x86 *c = &cpu_data(cpu);
41 cpu_set(cpu, cpu_sibling_setup_map);
43 if (smp_num_siblings > 1) {
44 for_each_cpu_mask(i, cpu_sibling_setup_map) {
45 if (c->phys_proc_id == cpu_data(i).phys_proc_id &&
46 c->cpu_core_id == cpu_data(i).cpu_core_id) {
47 cpu_set(i, per_cpu(cpu_sibling_map, cpu));
48 cpu_set(cpu, per_cpu(cpu_sibling_map, i));
49 cpu_set(i, per_cpu(cpu_core_map, cpu));
50 cpu_set(cpu, per_cpu(cpu_core_map, i));
51 cpu_set(i, c->llc_shared_map);
52 cpu_set(cpu, cpu_data(i).llc_shared_map);
56 cpu_set(cpu, per_cpu(cpu_sibling_map, cpu));
59 cpu_set(cpu, c->llc_shared_map);
61 if (current_cpu_data.x86_max_cores == 1) {
62 per_cpu(cpu_core_map, cpu) = per_cpu(cpu_sibling_map, cpu);
67 for_each_cpu_mask(i, cpu_sibling_setup_map) {
68 if (per_cpu(cpu_llc_id, cpu) != BAD_APICID &&
69 per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) {
70 cpu_set(i, c->llc_shared_map);
71 cpu_set(cpu, cpu_data(i).llc_shared_map);
73 if (c->phys_proc_id == cpu_data(i).phys_proc_id) {
74 cpu_set(i, per_cpu(cpu_core_map, cpu));
75 cpu_set(cpu, per_cpu(cpu_core_map, i));
77 * Does this new cpu bringup a new core?
79 if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1) {
81 * for each core in package, increment
82 * the booted_cores for this new cpu
84 if (first_cpu(per_cpu(cpu_sibling_map, i)) == i)
87 * increment the core count for all
88 * the other cpus in this package
91 cpu_data(i).booted_cores++;
92 } else if (i != cpu && !c->booted_cores)
93 c->booted_cores = cpu_data(i).booted_cores;
98 #ifdef CONFIG_HOTPLUG_CPU
99 void remove_siblinginfo(int cpu)
102 struct cpuinfo_x86 *c = &cpu_data(cpu);
104 for_each_cpu_mask(sibling, per_cpu(cpu_core_map, cpu)) {
105 cpu_clear(cpu, per_cpu(cpu_core_map, sibling));
107 * last thread sibling in this cpu core going down
109 if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1)
110 cpu_data(sibling).booted_cores--;
113 for_each_cpu_mask(sibling, per_cpu(cpu_sibling_map, cpu))
114 cpu_clear(cpu, per_cpu(cpu_sibling_map, sibling));
115 cpus_clear(per_cpu(cpu_sibling_map, cpu));
116 cpus_clear(per_cpu(cpu_core_map, cpu));
119 cpu_clear(cpu, cpu_sibling_setup_map);
122 int additional_cpus __initdata = -1;
124 static __init int setup_additional_cpus(char *s)
126 return s && get_option(&s, &additional_cpus) ? 0 : -EINVAL;
128 early_param("additional_cpus", setup_additional_cpus);
131 * cpu_possible_map should be static, it cannot change as cpu's
132 * are onlined, or offlined. The reason is per-cpu data-structures
133 * are allocated by some modules at init time, and dont expect to
134 * do this dynamically on cpu arrival/departure.
135 * cpu_present_map on the other hand can change dynamically.
136 * In case when cpu_hotplug is not compiled, then we resort to current
137 * behaviour, which is cpu_possible == cpu_present.
140 * Three ways to find out the number of additional hotplug CPUs:
141 * - If the BIOS specified disabled CPUs in ACPI/mptables use that.
142 * - The user can overwrite it with additional_cpus=NUM
143 * - Otherwise don't reserve additional CPUs.
144 * We do this because additional CPUs waste a lot of memory.
147 __init void prefill_possible_map(void)
152 if (additional_cpus == -1) {
153 if (disabled_cpus > 0)
154 additional_cpus = disabled_cpus;
158 possible = num_processors + additional_cpus;
159 if (possible > NR_CPUS)
162 printk(KERN_INFO "SMP: Allowing %d CPUs, %d hotplug CPUs\n",
163 possible, max_t(int, possible - num_processors, 0));
165 for (i = 0; i < possible; i++)
166 cpu_set(i, cpu_possible_map);