2 * x86 SMP booting functions
4 * (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
5 * (c) 1998, 1999, 2000 Ingo Molnar <mingo@redhat.com>
6 * Copyright 2001 Andi Kleen, SuSE Labs.
8 * Much of the core SMP work is based on previous work by Thomas Radke, to
9 * whom a great many thanks are extended.
11 * Thanks to Intel for making available several different Pentium,
12 * Pentium Pro and Pentium-II/Xeon MP machines.
13 * Original development of Linux SMP code supported by Caldera.
15 * This code is released under the GNU General Public License version 2 or
19 * Felix Koop : NR_CPUS used properly
20 * Jose Renau : Handle single CPU case.
21 * Alan Cox : By repeated request 8) - Total BogoMIPS report.
22 * Greg Wright : Fix for kernel stacks panic.
23 * Erich Boleyn : MP v1.4 and additional changes.
24 * Matthias Sattler : Changes for 2.1 kernel map.
25 * Michel Lespinasse : Changes for 2.1 kernel map.
26 * Michael Chastain : Change trampoline.S to gnu as.
27 * Alan Cox : Dumb bug: 'B' step PPro's are fine
28 * Ingo Molnar : Added APIC timers, based on code
30 * Ingo Molnar : various cleanups and rewrites
31 * Tigran Aivazian : fixed "0.00 in /proc/uptime on SMP" bug.
32 * Maciej W. Rozycki : Bits for genuine 82489DX APICs
33 * Andi Kleen : Changed for SMP boot into long mode.
34 * Martin J. Bligh : Added support for multi-quad systems
35 * Dave Jones : Report invalid combinations of Athlon CPUs.
36 * Rusty Russell : Hacked into shape for new "hotplug" boot process.
37 * Andi Kleen : Converted to new state machine.
38 * Ashok Raj : CPU hotplug support
39 * Glauber Costa : i386 and x86_64 integration
42 #include <linux/init.h>
43 #include <linux/smp.h>
44 #include <linux/module.h>
45 #include <linux/sched.h>
46 #include <linux/percpu.h>
47 #include <linux/bootmem.h>
48 #include <linux/err.h>
49 #include <linux/nmi.h>
58 #include <asm/pgtable.h>
59 #include <asm/tlbflush.h>
63 #include <linux/mc146818rtc.h>
65 #include <mach_apic.h>
66 #include <mach_wakecpu.h>
67 #include <smpboot_hooks.h>
70 * FIXME: For x86_64, those are defined in other files. But moving them here,
71 * would make the setup areas dependent on smp, which is a loss. When we
72 * integrate apic between arches, we can probably do a better job, but
73 * right now, they'll stay here -- glommer
76 /* which logical CPU number maps to which CPU (physical APIC ID) */
77 u16 x86_cpu_to_apicid_init[NR_CPUS] __initdata =
78 { [0 ... NR_CPUS-1] = BAD_APICID };
79 void *x86_cpu_to_apicid_early_ptr;
80 DEFINE_PER_CPU(u16, x86_cpu_to_apicid) = BAD_APICID;
81 EXPORT_PER_CPU_SYMBOL(x86_cpu_to_apicid);
83 u16 x86_bios_cpu_apicid_init[NR_CPUS] __initdata
84 = { [0 ... NR_CPUS-1] = BAD_APICID };
85 void *x86_bios_cpu_apicid_early_ptr;
87 u8 apicid_2_node[MAX_APICID];
90 /* Internal processor count */
91 unsigned int num_processors;
93 /* Bitmask of physically existing CPUs */
94 physid_mask_t phys_cpu_present_map;
96 /* State of each CPU */
97 DEFINE_PER_CPU(int, cpu_state) = { 0 };
99 unsigned disabled_cpus __cpuinitdata;
101 /* Store all idle threads, this can be reused instead of creating
102 * a new thread. Also avoids complicated thread destroy functionality
105 #ifdef CONFIG_HOTPLUG_CPU
107 * Needed only for CONFIG_HOTPLUG_CPU because __cpuinitdata is
108 * removed after init for !CONFIG_HOTPLUG_CPU.
110 static DEFINE_PER_CPU(struct task_struct *, idle_thread_array);
111 #define get_idle_for_cpu(x) (per_cpu(idle_thread_array, x))
112 #define set_idle_for_cpu(x, p) (per_cpu(idle_thread_array, x) = (p))
114 struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ;
115 #define get_idle_for_cpu(x) (idle_thread_array[(x)])
116 #define set_idle_for_cpu(x, p) (idle_thread_array[(x)] = (p))
119 /* Number of siblings per CPU package */
120 int smp_num_siblings = 1;
121 EXPORT_SYMBOL(smp_num_siblings);
123 /* Last level cache ID of each logical CPU */
124 DEFINE_PER_CPU(u16, cpu_llc_id) = BAD_APICID;
126 /* bitmap of online cpus */
127 cpumask_t cpu_online_map __read_mostly;
128 EXPORT_SYMBOL(cpu_online_map);
130 cpumask_t cpu_callin_map;
131 cpumask_t cpu_callout_map;
132 cpumask_t cpu_possible_map;
133 EXPORT_SYMBOL(cpu_possible_map);
135 /* representing HT siblings of each logical CPU */
136 DEFINE_PER_CPU(cpumask_t, cpu_sibling_map);
137 EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
139 /* representing HT and core siblings of each logical CPU */
140 DEFINE_PER_CPU(cpumask_t, cpu_core_map);
141 EXPORT_PER_CPU_SYMBOL(cpu_core_map);
143 /* Per CPU bogomips and other parameters */
144 DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info);
145 EXPORT_PER_CPU_SYMBOL(cpu_info);
147 static atomic_t init_deasserted;
149 static int boot_cpu_logical_apicid;
151 /* ready for x86_64, no harm for x86, since it will overwrite after alloc */
152 unsigned char *trampoline_base = __va(SMP_TRAMPOLINE_BASE);
154 /* representing cpus for which sibling maps can be computed */
155 static cpumask_t cpu_sibling_setup_map;
157 /* Set if we find a B stepping CPU */
158 int __cpuinitdata smp_b_stepping;
160 #if defined(CONFIG_NUMA) && defined(CONFIG_X86_32)
162 /* which logical CPUs are on which nodes */
163 cpumask_t node_to_cpumask_map[MAX_NUMNODES] __read_mostly =
164 { [0 ... MAX_NUMNODES-1] = CPU_MASK_NONE };
165 EXPORT_SYMBOL(node_to_cpumask_map);
166 /* which node each logical CPU is on */
167 int cpu_to_node_map[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = 0 };
168 EXPORT_SYMBOL(cpu_to_node_map);
170 /* set up a mapping between cpu and node. */
171 static void map_cpu_to_node(int cpu, int node)
173 printk(KERN_INFO "Mapping cpu %d to node %d\n", cpu, node);
174 cpu_set(cpu, node_to_cpumask_map[node]);
175 cpu_to_node_map[cpu] = node;
178 /* undo a mapping between cpu and node. */
179 static void unmap_cpu_to_node(int cpu)
183 printk(KERN_INFO "Unmapping cpu %d from all nodes\n", cpu);
184 for (node = 0; node < MAX_NUMNODES; node++)
185 cpu_clear(cpu, node_to_cpumask_map[node]);
186 cpu_to_node_map[cpu] = 0;
188 #else /* !(CONFIG_NUMA && CONFIG_X86_32) */
189 #define map_cpu_to_node(cpu, node) ({})
190 #define unmap_cpu_to_node(cpu) ({})
194 u8 cpu_2_logical_apicid[NR_CPUS] __read_mostly =
195 { [0 ... NR_CPUS-1] = BAD_APICID };
197 void map_cpu_to_logical_apicid(void)
199 int cpu = smp_processor_id();
200 int apicid = logical_smp_processor_id();
201 int node = apicid_to_node(apicid);
203 if (!node_online(node))
204 node = first_online_node;
206 cpu_2_logical_apicid[cpu] = apicid;
207 map_cpu_to_node(cpu, node);
210 void unmap_cpu_to_logical_apicid(int cpu)
212 cpu_2_logical_apicid[cpu] = BAD_APICID;
213 unmap_cpu_to_node(cpu);
216 #define unmap_cpu_to_logical_apicid(cpu) do {} while (0)
217 #define map_cpu_to_logical_apicid() do {} while (0)
221 * Report back to the Boot Processor.
224 void __cpuinit smp_callin(void)
227 unsigned long timeout;
230 * If waken up by an INIT in an 82489DX configuration
231 * we may get here before an INIT-deassert IPI reaches
232 * our local APIC. We have to wait for the IPI or we'll
233 * lock up on an APIC access.
235 wait_for_init_deassert(&init_deasserted);
238 * (This works even if the APIC is not enabled.)
240 phys_id = GET_APIC_ID(read_apic_id());
241 cpuid = smp_processor_id();
242 if (cpu_isset(cpuid, cpu_callin_map)) {
243 panic("%s: phys CPU#%d, CPU#%d already present??\n", __func__,
246 Dprintk("CPU#%d (phys ID: %d) waiting for CALLOUT\n", cpuid, phys_id);
249 * STARTUP IPIs are fragile beasts as they might sometimes
250 * trigger some glue motherboard logic. Complete APIC bus
251 * silence for 1 second, this overestimates the time the
252 * boot CPU is spending to send the up to 2 STARTUP IPIs
253 * by a factor of two. This should be enough.
257 * Waiting 2s total for startup (udelay is not yet working)
259 timeout = jiffies + 2*HZ;
260 while (time_before(jiffies, timeout)) {
262 * Has the boot CPU finished it's STARTUP sequence?
264 if (cpu_isset(cpuid, cpu_callout_map))
269 if (!time_before(jiffies, timeout)) {
270 panic("%s: CPU%d started up but did not get a callout!\n",
275 * the boot CPU has finished the init stage and is spinning
276 * on callin_map until we finish. We are free to set up this
277 * CPU, first the APIC. (this is probably redundant on most
281 Dprintk("CALLIN, before setup_local_APIC().\n");
282 smp_callin_clear_local_apic();
284 end_local_APIC_setup();
285 map_cpu_to_logical_apicid();
290 * Need to enable IRQs because it can take longer and then
291 * the NMI watchdog might kill us.
296 Dprintk("Stack at about %p\n", &cpuid);
299 * Save our processor parameters
301 smp_store_cpu_info(cpuid);
304 * Allow the master to continue.
306 cpu_set(cpuid, cpu_callin_map);
310 * Activate a secondary processor.
312 void __cpuinit start_secondary(void *unused)
315 * Don't put *anything* before cpu_init(), SMP booting is too
316 * fragile that we want to limit the things done here to the
317 * most necessary things.
326 /* otherwise gcc will move up smp_processor_id before the cpu_init */
329 * Check TSC synchronization with the BP:
331 check_tsc_sync_target();
333 if (nmi_watchdog == NMI_IO_APIC) {
334 disable_8259A_irq(0);
335 enable_NMI_through_LVT0();
339 /* This must be done before setting cpu_online_map */
340 set_cpu_sibling_map(raw_smp_processor_id());
344 * We need to hold call_lock, so there is no inconsistency
345 * between the time smp_call_function() determines number of
346 * IPI recipients, and the time when the determination is made
347 * for which cpus receive the IPI. Holding this
348 * lock helps us to not include this cpu in a currently in progress
349 * smp_call_function().
351 lock_ipi_call_lock();
353 spin_lock(&vector_lock);
355 /* Setup the per cpu irq handling data structures */
356 __setup_vector_irq(smp_processor_id());
358 * Allow the master to continue.
360 spin_unlock(&vector_lock);
362 cpu_set(smp_processor_id(), cpu_online_map);
363 unlock_ipi_call_lock();
364 per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
366 setup_secondary_clock();
374 * Everything has been set up for the secondary
375 * CPUs - they just need to reload everything
376 * from the task structure
377 * This function must not return.
379 void __devinit initialize_secondary(void)
382 * We don't actually need to load the full TSS,
383 * basically just the stack pointer and the ip.
390 :"m" (current->thread.sp), "m" (current->thread.ip));
394 static void __cpuinit smp_apply_quirks(struct cpuinfo_x86 *c)
398 * Mask B, Pentium, but not Pentium MMX
400 if (c->x86_vendor == X86_VENDOR_INTEL &&
402 c->x86_mask >= 1 && c->x86_mask <= 4 &&
405 * Remember we have B step Pentia with bugs
410 * Certain Athlons might work (for various values of 'work') in SMP
411 * but they are not certified as MP capable.
413 if ((c->x86_vendor == X86_VENDOR_AMD) && (c->x86 == 6)) {
415 if (num_possible_cpus() == 1)
418 /* Athlon 660/661 is valid. */
419 if ((c->x86_model == 6) && ((c->x86_mask == 0) ||
423 /* Duron 670 is valid */
424 if ((c->x86_model == 7) && (c->x86_mask == 0))
428 * Athlon 662, Duron 671, and Athlon >model 7 have capability
429 * bit. It's worth noting that the A5 stepping (662) of some
430 * Athlon XP's have the MP bit set.
431 * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for
434 if (((c->x86_model == 6) && (c->x86_mask >= 2)) ||
435 ((c->x86_model == 7) && (c->x86_mask >= 1)) ||
440 /* If we get here, not a certified SMP capable AMD system. */
441 add_taint(TAINT_UNSAFE_SMP);
449 void smp_checks(void)
452 printk(KERN_WARNING "WARNING: SMP operation may be unreliable"
453 "with B stepping processors.\n");
456 * Don't taint if we are running SMP kernel on a single non-MP
459 if (tainted & TAINT_UNSAFE_SMP) {
460 if (num_online_cpus())
461 printk(KERN_INFO "WARNING: This combination of AMD"
462 "processors is not suitable for SMP.\n");
464 tainted &= ~TAINT_UNSAFE_SMP;
469 * The bootstrap kernel entry code has set these up. Save them for
473 void __cpuinit smp_store_cpu_info(int id)
475 struct cpuinfo_x86 *c = &cpu_data(id);
480 identify_secondary_cpu(c);
485 void __cpuinit set_cpu_sibling_map(int cpu)
488 struct cpuinfo_x86 *c = &cpu_data(cpu);
490 cpu_set(cpu, cpu_sibling_setup_map);
492 if (smp_num_siblings > 1) {
493 for_each_cpu_mask(i, cpu_sibling_setup_map) {
494 if (c->phys_proc_id == cpu_data(i).phys_proc_id &&
495 c->cpu_core_id == cpu_data(i).cpu_core_id) {
496 cpu_set(i, per_cpu(cpu_sibling_map, cpu));
497 cpu_set(cpu, per_cpu(cpu_sibling_map, i));
498 cpu_set(i, per_cpu(cpu_core_map, cpu));
499 cpu_set(cpu, per_cpu(cpu_core_map, i));
500 cpu_set(i, c->llc_shared_map);
501 cpu_set(cpu, cpu_data(i).llc_shared_map);
505 cpu_set(cpu, per_cpu(cpu_sibling_map, cpu));
508 cpu_set(cpu, c->llc_shared_map);
510 if (current_cpu_data.x86_max_cores == 1) {
511 per_cpu(cpu_core_map, cpu) = per_cpu(cpu_sibling_map, cpu);
516 for_each_cpu_mask(i, cpu_sibling_setup_map) {
517 if (per_cpu(cpu_llc_id, cpu) != BAD_APICID &&
518 per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) {
519 cpu_set(i, c->llc_shared_map);
520 cpu_set(cpu, cpu_data(i).llc_shared_map);
522 if (c->phys_proc_id == cpu_data(i).phys_proc_id) {
523 cpu_set(i, per_cpu(cpu_core_map, cpu));
524 cpu_set(cpu, per_cpu(cpu_core_map, i));
526 * Does this new cpu bringup a new core?
528 if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1) {
530 * for each core in package, increment
531 * the booted_cores for this new cpu
533 if (first_cpu(per_cpu(cpu_sibling_map, i)) == i)
536 * increment the core count for all
537 * the other cpus in this package
540 cpu_data(i).booted_cores++;
541 } else if (i != cpu && !c->booted_cores)
542 c->booted_cores = cpu_data(i).booted_cores;
547 /* maps the cpu to the sched domain representing multi-core */
548 cpumask_t cpu_coregroup_map(int cpu)
550 struct cpuinfo_x86 *c = &cpu_data(cpu);
552 * For perf, we return last level cache shared map.
553 * And for power savings, we return cpu_core_map
555 if (sched_mc_power_savings || sched_smt_power_savings)
556 return per_cpu(cpu_core_map, cpu);
558 return c->llc_shared_map;
562 * Currently trivial. Write the real->protected mode
563 * bootstrap into the page concerned. The caller
564 * has made sure it's suitably aligned.
567 unsigned long __cpuinit setup_trampoline(void)
569 memcpy(trampoline_base, trampoline_data,
570 trampoline_end - trampoline_data);
571 return virt_to_phys(trampoline_base);
576 * We are called very early to get the low memory for the
577 * SMP bootup trampoline page.
579 void __init smp_alloc_memory(void)
581 trampoline_base = alloc_bootmem_low_pages(PAGE_SIZE);
583 * Has to be in very low memory so we can execute
586 if (__pa(trampoline_base) >= 0x9F000)
591 void impress_friends(void)
594 unsigned long bogosum = 0;
596 * Allow the user to impress friends.
598 Dprintk("Before bogomips.\n");
599 for_each_possible_cpu(cpu)
600 if (cpu_isset(cpu, cpu_callout_map))
601 bogosum += cpu_data(cpu).loops_per_jiffy;
603 "Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
606 (bogosum/(5000/HZ))%100);
608 Dprintk("Before bogocount - setting activated=1.\n");
611 static inline void __inquire_remote_apic(int apicid)
613 unsigned i, regs[] = { APIC_ID >> 4, APIC_LVR >> 4, APIC_SPIV >> 4 };
614 char *names[] = { "ID", "VERSION", "SPIV" };
618 printk(KERN_INFO "Inquiring remote APIC #%d...\n", apicid);
620 for (i = 0; i < ARRAY_SIZE(regs); i++) {
621 printk(KERN_INFO "... APIC #%d %s: ", apicid, names[i]);
626 status = safe_apic_wait_icr_idle();
629 "a previous APIC delivery may have failed\n");
631 apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(apicid));
632 apic_write_around(APIC_ICR, APIC_DM_REMRD | regs[i]);
637 status = apic_read(APIC_ICR) & APIC_ICR_RR_MASK;
638 } while (status == APIC_ICR_RR_INPROG && timeout++ < 1000);
641 case APIC_ICR_RR_VALID:
642 status = apic_read(APIC_RRR);
643 printk(KERN_CONT "%08x\n", status);
646 printk(KERN_CONT "failed\n");
651 #ifdef WAKE_SECONDARY_VIA_NMI
653 * Poke the other CPU in the eye via NMI to wake it up. Remember that the normal
654 * INIT, INIT, STARTUP sequence will reset the chip hard for us, and this
655 * won't ... remember to clear down the APIC, etc later.
658 wakeup_secondary_cpu(int logical_apicid, unsigned long start_eip)
660 unsigned long send_status, accept_status = 0;
664 apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(logical_apicid));
666 /* Boot on the stack */
667 /* Kick the second */
668 apic_write_around(APIC_ICR, APIC_DM_NMI | APIC_DEST_LOGICAL);
670 Dprintk("Waiting for send to finish...\n");
671 send_status = safe_apic_wait_icr_idle();
674 * Give the other CPU some time to accept the IPI.
678 * Due to the Pentium erratum 3AP.
680 maxlvt = lapic_get_maxlvt();
682 apic_read_around(APIC_SPIV);
683 apic_write(APIC_ESR, 0);
685 accept_status = (apic_read(APIC_ESR) & 0xEF);
686 Dprintk("NMI sent.\n");
689 printk(KERN_ERR "APIC never delivered???\n");
691 printk(KERN_ERR "APIC delivery error (%lx).\n", accept_status);
693 return (send_status | accept_status);
695 #endif /* WAKE_SECONDARY_VIA_NMI */
697 #ifdef WAKE_SECONDARY_VIA_INIT
699 wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip)
701 unsigned long send_status, accept_status = 0;
702 int maxlvt, num_starts, j;
705 * Be paranoid about clearing APIC errors.
707 if (APIC_INTEGRATED(apic_version[phys_apicid])) {
708 apic_read_around(APIC_SPIV);
709 apic_write(APIC_ESR, 0);
713 Dprintk("Asserting INIT.\n");
716 * Turn INIT on target chip
718 apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
723 apic_write_around(APIC_ICR, APIC_INT_LEVELTRIG | APIC_INT_ASSERT
726 Dprintk("Waiting for send to finish...\n");
727 send_status = safe_apic_wait_icr_idle();
731 Dprintk("Deasserting INIT.\n");
734 apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
737 apic_write_around(APIC_ICR, APIC_INT_LEVELTRIG | APIC_DM_INIT);
739 Dprintk("Waiting for send to finish...\n");
740 send_status = safe_apic_wait_icr_idle();
743 atomic_set(&init_deasserted, 1);
746 * Should we send STARTUP IPIs ?
748 * Determine this based on the APIC version.
749 * If we don't have an integrated APIC, don't send the STARTUP IPIs.
751 if (APIC_INTEGRATED(apic_version[phys_apicid]))
757 * Paravirt / VMI wants a startup IPI hook here to set up the
758 * target processor state.
760 startup_ipi_hook(phys_apicid, (unsigned long) start_secondary,
762 (unsigned long)init_rsp);
764 (unsigned long)stack_start.sp);
768 * Run STARTUP IPI loop.
770 Dprintk("#startup loops: %d.\n", num_starts);
772 maxlvt = lapic_get_maxlvt();
774 for (j = 1; j <= num_starts; j++) {
775 Dprintk("Sending STARTUP #%d.\n", j);
776 apic_read_around(APIC_SPIV);
777 apic_write(APIC_ESR, 0);
779 Dprintk("After apic_write.\n");
786 apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
788 /* Boot on the stack */
789 /* Kick the second */
790 apic_write_around(APIC_ICR, APIC_DM_STARTUP
791 | (start_eip >> 12));
794 * Give the other CPU some time to accept the IPI.
798 Dprintk("Startup point 1.\n");
800 Dprintk("Waiting for send to finish...\n");
801 send_status = safe_apic_wait_icr_idle();
804 * Give the other CPU some time to accept the IPI.
808 * Due to the Pentium erratum 3AP.
811 apic_read_around(APIC_SPIV);
812 apic_write(APIC_ESR, 0);
814 accept_status = (apic_read(APIC_ESR) & 0xEF);
815 if (send_status || accept_status)
818 Dprintk("After Startup.\n");
821 printk(KERN_ERR "APIC never delivered???\n");
823 printk(KERN_ERR "APIC delivery error (%lx).\n", accept_status);
825 return (send_status | accept_status);
827 #endif /* WAKE_SECONDARY_VIA_INIT */
830 struct work_struct work;
831 struct task_struct *idle;
832 struct completion done;
836 static void __cpuinit do_fork_idle(struct work_struct *work)
838 struct create_idle *c_idle =
839 container_of(work, struct create_idle, work);
841 c_idle->idle = fork_idle(c_idle->cpu);
842 complete(&c_idle->done);
845 static int __cpuinit do_boot_cpu(int apicid, int cpu)
847 * NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad
848 * (ie clustered apic addressing mode), this is a LOGICAL apic ID.
849 * Returns zero if CPU booted OK, else error code from wakeup_secondary_cpu.
852 unsigned long boot_error = 0;
854 unsigned long start_ip;
855 unsigned short nmi_high = 0, nmi_low = 0;
856 struct create_idle c_idle = {
858 .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done),
860 INIT_WORK(&c_idle.work, do_fork_idle);
862 /* allocate memory for gdts of secondary cpus. Hotplug is considered */
863 if (!cpu_gdt_descr[cpu].address &&
864 !(cpu_gdt_descr[cpu].address = get_zeroed_page(GFP_KERNEL))) {
865 printk(KERN_ERR "Failed to allocate GDT for CPU %d\n", cpu);
869 /* Allocate node local memory for AP pdas */
870 if (cpu_pda(cpu) == &boot_cpu_pda[cpu]) {
871 struct x8664_pda *newpda, *pda;
872 int node = cpu_to_node(cpu);
874 newpda = kmalloc_node(sizeof(struct x8664_pda), GFP_ATOMIC,
877 memcpy(newpda, pda, sizeof(struct x8664_pda));
878 cpu_pda(cpu) = newpda;
881 "Could not allocate node local PDA for CPU %d on node %d\n",
886 alternatives_smp_switch(1);
888 c_idle.idle = get_idle_for_cpu(cpu);
891 * We can't use kernel_thread since we must avoid to
892 * reschedule the child.
895 c_idle.idle->thread.sp = (unsigned long) (((struct pt_regs *)
896 (THREAD_SIZE + task_stack_page(c_idle.idle))) - 1);
897 init_idle(c_idle.idle, cpu);
901 if (!keventd_up() || current_is_keventd())
902 c_idle.work.func(&c_idle.work);
904 schedule_work(&c_idle.work);
905 wait_for_completion(&c_idle.done);
908 if (IS_ERR(c_idle.idle)) {
909 printk("failed fork for CPU %d\n", cpu);
910 return PTR_ERR(c_idle.idle);
913 set_idle_for_cpu(cpu, c_idle.idle);
916 per_cpu(current_task, cpu) = c_idle.idle;
918 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
919 c_idle.idle->thread.ip = (unsigned long) start_secondary;
920 /* Stack for startup_32 can be just as for start_secondary onwards */
921 stack_start.sp = (void *) c_idle.idle->thread.sp;
924 cpu_pda(cpu)->pcurrent = c_idle.idle;
925 init_rsp = c_idle.idle->thread.sp;
926 load_sp0(&per_cpu(init_tss, cpu), &c_idle.idle->thread);
927 initial_code = (unsigned long)start_secondary;
928 clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
931 /* start_ip had better be page-aligned! */
932 start_ip = setup_trampoline();
934 /* So we see what's up */
935 printk(KERN_INFO "Booting processor %d/%d ip %lx\n",
936 cpu, apicid, start_ip);
939 * This grunge runs the startup process for
940 * the targeted processor.
943 atomic_set(&init_deasserted, 0);
945 Dprintk("Setting warm reset code and vector.\n");
947 store_NMI_vector(&nmi_high, &nmi_low);
949 smpboot_setup_warm_reset_vector(start_ip);
951 * Be paranoid about clearing APIC errors.
953 apic_write(APIC_ESR, 0);
957 * Starting actual IPI sequence...
959 boot_error = wakeup_secondary_cpu(apicid, start_ip);
963 * allow APs to start initializing.
965 Dprintk("Before Callout %d.\n", cpu);
966 cpu_set(cpu, cpu_callout_map);
967 Dprintk("After Callout %d.\n", cpu);
970 * Wait 5s total for a response
972 for (timeout = 0; timeout < 50000; timeout++) {
973 if (cpu_isset(cpu, cpu_callin_map))
974 break; /* It has booted */
978 if (cpu_isset(cpu, cpu_callin_map)) {
979 /* number CPUs logically, starting from 1 (BSP is 0) */
981 printk(KERN_INFO "CPU%d: ", cpu);
982 print_cpu_info(&cpu_data(cpu));
983 Dprintk("CPU has booted.\n");
986 if (*((volatile unsigned char *)trampoline_base)
988 /* trampoline started but...? */
989 printk(KERN_ERR "Stuck ??\n");
991 /* trampoline code not run */
992 printk(KERN_ERR "Not responding.\n");
993 inquire_remote_apic(apicid);
998 /* Try to put things back the way they were before ... */
999 unmap_cpu_to_logical_apicid(cpu);
1000 #ifdef CONFIG_X86_64
1001 clear_node_cpumask(cpu); /* was set by numa_add_cpu */
1003 cpu_clear(cpu, cpu_callout_map); /* was set by do_boot_cpu() */
1004 cpu_clear(cpu, cpu_initialized); /* was set by cpu_init() */
1005 cpu_clear(cpu, cpu_possible_map);
1006 cpu_clear(cpu, cpu_present_map);
1007 per_cpu(x86_cpu_to_apicid, cpu) = BAD_APICID;
1010 /* mark "stuck" area as not stuck */
1011 *((volatile unsigned long *)trampoline_base) = 0;
1016 int __cpuinit native_cpu_up(unsigned int cpu)
1018 int apicid = cpu_present_to_apicid(cpu);
1019 unsigned long flags;
1022 WARN_ON(irqs_disabled());
1024 Dprintk("++++++++++++++++++++=_---CPU UP %u\n", cpu);
1026 if (apicid == BAD_APICID || apicid == boot_cpu_physical_apicid ||
1027 !physid_isset(apicid, phys_cpu_present_map)) {
1028 printk(KERN_ERR "%s: bad cpu %d\n", __func__, cpu);
1033 * Already booted CPU?
1035 if (cpu_isset(cpu, cpu_callin_map)) {
1036 Dprintk("do_boot_cpu %d Already started\n", cpu);
1041 * Save current MTRR state in case it was changed since early boot
1042 * (e.g. by the ACPI SMI) to initialize new CPUs with MTRRs in sync:
1046 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
1048 #ifdef CONFIG_X86_32
1049 /* init low mem mapping */
1050 clone_pgd_range(swapper_pg_dir, swapper_pg_dir + USER_PGD_PTRS,
1051 min_t(unsigned long, KERNEL_PGD_PTRS, USER_PGD_PTRS));
1055 err = do_boot_cpu(apicid, cpu);
1057 Dprintk("do_boot_cpu failed %d\n", err);
1062 * Check TSC synchronization with the AP (keep irqs disabled
1065 local_irq_save(flags);
1066 check_tsc_sync_source(cpu);
1067 local_irq_restore(flags);
1069 while (!cpu_isset(cpu, cpu_online_map)) {
1071 touch_nmi_watchdog();
1078 * Fall back to non SMP mode after errors.
1080 * RED-PEN audit/test this more. I bet there is more state messed up here.
1082 static __init void disable_smp(void)
1084 cpu_present_map = cpumask_of_cpu(0);
1085 cpu_possible_map = cpumask_of_cpu(0);
1086 #ifdef CONFIG_X86_32
1087 smpboot_clear_io_apic_irqs();
1089 if (smp_found_config)
1090 phys_cpu_present_map =
1091 physid_mask_of_physid(boot_cpu_physical_apicid);
1093 phys_cpu_present_map = physid_mask_of_physid(0);
1094 map_cpu_to_logical_apicid();
1095 cpu_set(0, per_cpu(cpu_sibling_map, 0));
1096 cpu_set(0, per_cpu(cpu_core_map, 0));
1100 * Various sanity checks.
1102 static int __init smp_sanity_check(unsigned max_cpus)
1105 if (!physid_isset(hard_smp_processor_id(), phys_cpu_present_map)) {
1106 printk(KERN_WARNING "weird, boot CPU (#%d) not listed"
1107 "by the BIOS.\n", hard_smp_processor_id());
1108 physid_set(hard_smp_processor_id(), phys_cpu_present_map);
1112 * If we couldn't find an SMP configuration at boot time,
1113 * get out of here now!
1115 if (!smp_found_config && !acpi_lapic) {
1117 printk(KERN_NOTICE "SMP motherboard not detected.\n");
1119 if (APIC_init_uniprocessor())
1120 printk(KERN_NOTICE "Local APIC not detected."
1121 " Using dummy APIC emulation.\n");
1126 * Should not be necessary because the MP table should list the boot
1127 * CPU too, but we do it for the sake of robustness anyway.
1129 if (!check_phys_apicid_present(boot_cpu_physical_apicid)) {
1131 "weird, boot CPU (#%d) not listed by the BIOS.\n",
1132 boot_cpu_physical_apicid);
1133 physid_set(hard_smp_processor_id(), phys_cpu_present_map);
1138 * If we couldn't find a local APIC, then get out of here now!
1140 if (APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid]) &&
1142 printk(KERN_ERR "BIOS bug, local APIC #%d not detected!...\n",
1143 boot_cpu_physical_apicid);
1144 printk(KERN_ERR "... forcing use of dummy APIC emulation."
1145 "(tell your hw vendor)\n");
1146 smpboot_clear_io_apic();
1150 verify_local_APIC();
1153 * If SMP should be disabled, then really disable it!
1156 printk(KERN_INFO "SMP mode deactivated,"
1157 "forcing use of dummy APIC emulation.\n");
1158 smpboot_clear_io_apic();
1159 #ifdef CONFIG_X86_32
1160 if (nmi_watchdog == NMI_LOCAL_APIC) {
1161 printk(KERN_INFO "activating minimal APIC for"
1162 "NMI watchdog use.\n");
1165 end_local_APIC_setup();
1174 static void __init smp_cpu_index_default(void)
1177 struct cpuinfo_x86 *c;
1179 for_each_cpu_mask(i, cpu_possible_map) {
1181 /* mark all to hotplug */
1182 c->cpu_index = NR_CPUS;
1187 * Prepare for SMP bootup. The MP table or ACPI has been read
1188 * earlier. Just do some sanity checking here and enable APIC mode.
1190 void __init native_smp_prepare_cpus(unsigned int max_cpus)
1192 nmi_watchdog_default();
1193 smp_cpu_index_default();
1194 current_cpu_data = boot_cpu_data;
1195 cpu_callin_map = cpumask_of_cpu(0);
1198 * Setup boot CPU information
1200 smp_store_cpu_info(0); /* Final full version of the data */
1201 boot_cpu_logical_apicid = logical_smp_processor_id();
1202 current_thread_info()->cpu = 0; /* needed? */
1203 set_cpu_sibling_map(0);
1205 if (smp_sanity_check(max_cpus) < 0) {
1206 printk(KERN_INFO "SMP disabled\n");
1212 if (GET_APIC_ID(read_apic_id()) != boot_cpu_physical_apicid) {
1213 panic("Boot APIC ID in local APIC unexpected (%d vs %d)",
1214 GET_APIC_ID(read_apic_id()), boot_cpu_physical_apicid);
1215 /* Or can we switch back to PIC here? */
1219 #ifdef CONFIG_X86_32
1223 * Switch from PIC to APIC mode.
1227 #ifdef CONFIG_X86_64
1229 * Enable IO APIC before setting up error vector
1231 if (!skip_ioapic_setup && nr_ioapics)
1234 end_local_APIC_setup();
1236 map_cpu_to_logical_apicid();
1238 setup_portio_remap();
1240 smpboot_setup_io_apic();
1242 * Set up local APIC timer on boot CPU.
1245 printk(KERN_INFO "CPU%d: ", 0);
1246 print_cpu_info(&cpu_data(0));
1250 * Early setup to make printk work.
1252 void __init native_smp_prepare_boot_cpu(void)
1254 int me = smp_processor_id();
1255 #ifdef CONFIG_X86_32
1257 switch_to_new_gdt();
1259 /* already set me in cpu_online_map in boot_cpu_init() */
1260 cpu_set(me, cpu_callout_map);
1261 per_cpu(cpu_state, me) = CPU_ONLINE;
1264 void __init native_smp_cpus_done(unsigned int max_cpus)
1267 * Cleanup possible dangling ends...
1269 smpboot_restore_warm_reset_vector();
1271 Dprintk("Boot done.\n");
1275 #ifdef CONFIG_X86_IO_APIC
1276 setup_ioapic_dest();
1278 check_nmi_watchdog();
1279 #ifdef CONFIG_X86_32
1284 #ifdef CONFIG_HOTPLUG_CPU
1286 # ifdef CONFIG_X86_32
1287 void cpu_exit_clear(void)
1289 int cpu = raw_smp_processor_id();
1296 cpu_clear(cpu, cpu_callout_map);
1297 cpu_clear(cpu, cpu_callin_map);
1299 unmap_cpu_to_logical_apicid(cpu);
1301 # endif /* CONFIG_X86_32 */
1303 void remove_siblinginfo(int cpu)
1306 struct cpuinfo_x86 *c = &cpu_data(cpu);
1308 for_each_cpu_mask(sibling, per_cpu(cpu_core_map, cpu)) {
1309 cpu_clear(cpu, per_cpu(cpu_core_map, sibling));
1311 * last thread sibling in this cpu core going down
1313 if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1)
1314 cpu_data(sibling).booted_cores--;
1317 for_each_cpu_mask(sibling, per_cpu(cpu_sibling_map, cpu))
1318 cpu_clear(cpu, per_cpu(cpu_sibling_map, sibling));
1319 cpus_clear(per_cpu(cpu_sibling_map, cpu));
1320 cpus_clear(per_cpu(cpu_core_map, cpu));
1321 c->phys_proc_id = 0;
1323 cpu_clear(cpu, cpu_sibling_setup_map);
1326 int additional_cpus __initdata = -1;
1328 static __init int setup_additional_cpus(char *s)
1330 return s && get_option(&s, &additional_cpus) ? 0 : -EINVAL;
1332 early_param("additional_cpus", setup_additional_cpus);
1335 * cpu_possible_map should be static, it cannot change as cpu's
1336 * are onlined, or offlined. The reason is per-cpu data-structures
1337 * are allocated by some modules at init time, and dont expect to
1338 * do this dynamically on cpu arrival/departure.
1339 * cpu_present_map on the other hand can change dynamically.
1340 * In case when cpu_hotplug is not compiled, then we resort to current
1341 * behaviour, which is cpu_possible == cpu_present.
1344 * Three ways to find out the number of additional hotplug CPUs:
1345 * - If the BIOS specified disabled CPUs in ACPI/mptables use that.
1346 * - The user can overwrite it with additional_cpus=NUM
1347 * - Otherwise don't reserve additional CPUs.
1348 * We do this because additional CPUs waste a lot of memory.
1351 __init void prefill_possible_map(void)
1356 if (additional_cpus == -1) {
1357 if (disabled_cpus > 0)
1358 additional_cpus = disabled_cpus;
1360 additional_cpus = 0;
1362 possible = num_processors + additional_cpus;
1363 if (possible > NR_CPUS)
1366 printk(KERN_INFO "SMP: Allowing %d CPUs, %d hotplug CPUs\n",
1367 possible, max_t(int, possible - num_processors, 0));
1369 for (i = 0; i < possible; i++)
1370 cpu_set(i, cpu_possible_map);
1373 static void __ref remove_cpu_from_maps(int cpu)
1375 cpu_clear(cpu, cpu_online_map);
1376 #ifdef CONFIG_X86_64
1377 cpu_clear(cpu, cpu_callout_map);
1378 cpu_clear(cpu, cpu_callin_map);
1379 /* was set by cpu_init() */
1380 clear_bit(cpu, (unsigned long *)&cpu_initialized);
1381 clear_node_cpumask(cpu);
1385 int __cpu_disable(void)
1387 int cpu = smp_processor_id();
1390 * Perhaps use cpufreq to drop frequency, but that could go
1391 * into generic code.
1393 * We won't take down the boot processor on i386 due to some
1394 * interrupts only being able to be serviced by the BSP.
1395 * Especially so if we're not using an IOAPIC -zwane
1400 if (nmi_watchdog == NMI_LOCAL_APIC)
1401 stop_apic_nmi_watchdog(NULL);
1406 * Allow any queued timer interrupts to get serviced
1407 * This is only a temporary solution until we cleanup
1408 * fixup_irqs as we do for IA64.
1413 local_irq_disable();
1414 remove_siblinginfo(cpu);
1416 /* It's now safe to remove this processor from the online map */
1417 remove_cpu_from_maps(cpu);
1418 fixup_irqs(cpu_online_map);
1422 void __cpu_die(unsigned int cpu)
1424 /* We don't do anything here: idle task is faking death itself. */
1427 for (i = 0; i < 10; i++) {
1428 /* They ack this in play_dead by setting CPU_DEAD */
1429 if (per_cpu(cpu_state, cpu) == CPU_DEAD) {
1430 printk(KERN_INFO "CPU %d is now offline\n", cpu);
1431 if (1 == num_online_cpus())
1432 alternatives_smp_switch(0);
1437 printk(KERN_ERR "CPU %u didn't die...\n", cpu);
1439 #else /* ... !CONFIG_HOTPLUG_CPU */
1440 int __cpu_disable(void)
1445 void __cpu_die(unsigned int cpu)
1447 /* We said "no" in __cpu_disable */
1453 * If the BIOS enumerates physical processors before logical,
1454 * maxcpus=N at enumeration-time can be used to disable HT.
1456 static int __init parse_maxcpus(char *arg)
1458 extern unsigned int maxcpus;
1460 maxcpus = simple_strtoul(arg, NULL, 0);
1463 early_param("maxcpus", parse_maxcpus);