]> err.no Git - linux-2.6/blob - arch/x86/kernel/setup.c
x86: restore pda nodenumber field
[linux-2.6] / arch / x86 / kernel / setup.c
1 #include <linux/kernel.h>
2 #include <linux/module.h>
3 #include <linux/init.h>
4 #include <linux/bootmem.h>
5 #include <linux/percpu.h>
6 #include <asm/smp.h>
7 #include <asm/percpu.h>
8 #include <asm/sections.h>
9 #include <asm/processor.h>
10 #include <asm/setup.h>
11 #include <asm/topology.h>
12 #include <asm/mpspec.h>
13 #include <asm/apicdef.h>
14
15 #ifdef CONFIG_X86_LOCAL_APIC
16 unsigned int num_processors;
17 unsigned disabled_cpus __cpuinitdata;
18 /* Processor that is doing the boot up */
19 unsigned int boot_cpu_physical_apicid = -1U;
20 EXPORT_SYMBOL(boot_cpu_physical_apicid);
21
22 /* Bitmask of physically existing CPUs */
23 physid_mask_t phys_cpu_present_map;
24 #endif
25
26 /* map cpu index to physical APIC ID */
27 DEFINE_EARLY_PER_CPU(u16, x86_cpu_to_apicid, BAD_APICID);
28 DEFINE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid, BAD_APICID);
29 EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_apicid);
30 EXPORT_EARLY_PER_CPU_SYMBOL(x86_bios_cpu_apicid);
31
32 #if defined(CONFIG_NUMA) && defined(CONFIG_X86_64)
33 #define X86_64_NUMA     1
34
35 /* map cpu index to node index */
36 DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE);
37 EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map);
38 #endif
39
40 #if defined(CONFIG_HAVE_SETUP_PER_CPU_AREA) && defined(CONFIG_X86_SMP)
41 /*
42  * Copy data used in early init routines from the initial arrays to the
43  * per cpu data areas.  These arrays then become expendable and the
44  * *_early_ptr's are zeroed indicating that the static arrays are gone.
45  */
46 static void __init setup_per_cpu_maps(void)
47 {
48         int cpu;
49
50         for_each_possible_cpu(cpu) {
51                 per_cpu(x86_cpu_to_apicid, cpu) =
52                                 early_per_cpu_map(x86_cpu_to_apicid, cpu);
53                 per_cpu(x86_bios_cpu_apicid, cpu) =
54                                 early_per_cpu_map(x86_bios_cpu_apicid, cpu);
55 #ifdef X86_64_NUMA
56                 per_cpu(x86_cpu_to_node_map, cpu) =
57                                 early_per_cpu_map(x86_cpu_to_node_map, cpu);
58 #endif
59         }
60
61         /* indicate the early static arrays will soon be gone */
62         early_per_cpu_ptr(x86_cpu_to_apicid) = NULL;
63         early_per_cpu_ptr(x86_bios_cpu_apicid) = NULL;
64 #ifdef X86_64_NUMA
65         early_per_cpu_ptr(x86_cpu_to_node_map) = NULL;
66 #endif
67 }
68
69 #ifdef CONFIG_HAVE_CPUMASK_OF_CPU_MAP
70 cpumask_t *cpumask_of_cpu_map __read_mostly;
71 EXPORT_SYMBOL(cpumask_of_cpu_map);
72
73 /* requires nr_cpu_ids to be initialized */
74 static void __init setup_cpumask_of_cpu(void)
75 {
76         int i;
77
78         /* alloc_bootmem zeroes memory */
79         cpumask_of_cpu_map = alloc_bootmem_low(sizeof(cpumask_t) * nr_cpu_ids);
80         for (i = 0; i < nr_cpu_ids; i++)
81                 cpu_set(i, cpumask_of_cpu_map[i]);
82 }
83 #else
84 static inline void setup_cpumask_of_cpu(void) { }
85 #endif
86
87 #ifdef CONFIG_X86_32
88 /*
89  * Great future not-so-futuristic plan: make i386 and x86_64 do it
90  * the same way
91  */
92 unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
93 EXPORT_SYMBOL(__per_cpu_offset);
94 #endif
95
96 /*
97  * Great future plan:
98  * Declare PDA itself and support (irqstack,tss,pgd) as per cpu data.
99  * Always point %gs to its beginning
100  */
101 void __init setup_per_cpu_areas(void)
102 {
103         int i, highest_cpu = 0;
104         unsigned long size;
105
106 #ifdef CONFIG_HOTPLUG_CPU
107         prefill_possible_map();
108 #endif
109
110         /* Copy section for each CPU (we discard the original) */
111         size = PERCPU_ENOUGH_ROOM;
112         printk(KERN_INFO "PERCPU: Allocating %lu bytes of per cpu data\n",
113                           size);
114
115         for_each_possible_cpu(i) {
116                 char *ptr;
117 #ifndef CONFIG_NEED_MULTIPLE_NODES
118                 ptr = alloc_bootmem_pages(size);
119 #else
120                 int node = early_cpu_to_node(i);
121                 if (!node_online(node) || !NODE_DATA(node)) {
122                         ptr = alloc_bootmem_pages(size);
123                         printk(KERN_INFO
124                                "cpu %d has no node %d or node-local memory\n",
125                                 i, node);
126                 }
127                 else
128                         ptr = alloc_bootmem_pages_node(NODE_DATA(node), size);
129 #endif
130                 if (!ptr)
131                         panic("Cannot allocate cpu data for CPU %d\n", i);
132 #ifdef CONFIG_X86_64
133                 cpu_pda(i)->data_offset = ptr - __per_cpu_start;
134 #else
135                 __per_cpu_offset[i] = ptr - __per_cpu_start;
136 #endif
137                 memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
138
139                 highest_cpu = i;
140         }
141
142         nr_cpu_ids = highest_cpu + 1;
143         printk(KERN_DEBUG "NR_CPUS: %d, nr_cpu_ids: %d\n", NR_CPUS, nr_cpu_ids);
144
145         /* Setup percpu data maps */
146         setup_per_cpu_maps();
147
148         /* Setup cpumask_of_cpu map */
149         setup_cpumask_of_cpu();
150 }
151
152 #endif
153
154 #ifdef X86_64_NUMA
155 void __cpuinit numa_set_node(int cpu, int node)
156 {
157         int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map);
158
159         if (node != NUMA_NO_NODE)
160                 cpu_pda(cpu)->nodenumber = node;
161
162         if (cpu_to_node_map)
163                 cpu_to_node_map[cpu] = node;
164
165         else if (per_cpu_offset(cpu))
166                 per_cpu(x86_cpu_to_node_map, cpu) = node;
167
168         else
169                 Dprintk(KERN_INFO "Setting node for non-present cpu %d\n", cpu);
170 }
171
172 void __cpuinit numa_clear_node(int cpu)
173 {
174         numa_set_node(cpu, NUMA_NO_NODE);
175 }
176
177 void __cpuinit numa_add_cpu(int cpu)
178 {
179         cpu_set(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
180 }
181
182 void __cpuinit numa_remove_cpu(int cpu)
183 {
184         cpu_clear(cpu, node_to_cpumask_map[cpu_to_node(cpu)]);
185 }
186 #endif /* CONFIG_NUMA */
187
188 #if defined(CONFIG_DEBUG_PER_CPU_MAPS) && defined(CONFIG_X86_64)
189
190 int cpu_to_node(int cpu)
191 {
192         if (early_per_cpu_ptr(x86_cpu_to_node_map)) {
193                 printk(KERN_WARNING
194                         "cpu_to_node(%d): usage too early!\n", cpu);
195                 dump_stack();
196                 return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
197         }
198         return per_cpu(x86_cpu_to_node_map, cpu);
199 }
200 EXPORT_SYMBOL(cpu_to_node);
201
202 int early_cpu_to_node(int cpu)
203 {
204         if (early_per_cpu_ptr(x86_cpu_to_node_map))
205                 return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
206
207         if (!per_cpu_offset(cpu)) {
208                 printk(KERN_WARNING
209                         "early_cpu_to_node(%d): no per_cpu area!\n", cpu);
210                         dump_stack();
211                 return NUMA_NO_NODE;
212         }
213         return per_cpu(x86_cpu_to_node_map, cpu);
214 }
215 #endif