2 * x86 SMP booting functions
4 * (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
5 * (c) 1998, 1999, 2000 Ingo Molnar <mingo@redhat.com>
6 * Copyright 2001 Andi Kleen, SuSE Labs.
8 * Much of the core SMP work is based on previous work by Thomas Radke, to
9 * whom a great many thanks are extended.
11 * Thanks to Intel for making available several different Pentium,
12 * Pentium Pro and Pentium-II/Xeon MP machines.
13 * Original development of Linux SMP code supported by Caldera.
15 * This code is released under the GNU General Public License version 2
18 * Felix Koop : NR_CPUS used properly
19 * Jose Renau : Handle single CPU case.
20 * Alan Cox : By repeated request 8) - Total BogoMIP report.
21 * Greg Wright : Fix for kernel stacks panic.
22 * Erich Boleyn : MP v1.4 and additional changes.
23 * Matthias Sattler : Changes for 2.1 kernel map.
24 * Michel Lespinasse : Changes for 2.1 kernel map.
25 * Michael Chastain : Change trampoline.S to gnu as.
26 * Alan Cox : Dumb bug: 'B' step PPro's are fine
27 * Ingo Molnar : Added APIC timers, based on code
29 * Ingo Molnar : various cleanups and rewrites
30 * Tigran Aivazian : fixed "0.00 in /proc/uptime on SMP" bug.
31 * Maciej W. Rozycki : Bits for genuine 82489DX APICs
32 * Andi Kleen : Changed for SMP boot into long mode.
33 * Rusty Russell : Hacked into shape for new "hotplug" boot process.
34 * Andi Kleen : Converted to new state machine.
36 * Probably mostly hotplug CPU ready now.
37 * Ashok Raj : CPU hotplug support
41 #include <linux/init.h>
44 #include <linux/kernel_stat.h>
45 #include <linux/bootmem.h>
46 #include <linux/thread_info.h>
47 #include <linux/module.h>
48 #include <linux/delay.h>
49 #include <linux/mc146818rtc.h>
50 #include <linux/smp.h>
51 #include <linux/kdebug.h>
54 #include <asm/pgalloc.h>
56 #include <asm/tlbflush.h>
57 #include <asm/proto.h>
60 #include <asm/hw_irq.h>
63 /* Number of siblings per CPU package */
64 int smp_num_siblings = 1;
65 EXPORT_SYMBOL(smp_num_siblings);
67 /* Last level cache ID of each logical CPU */
68 DEFINE_PER_CPU(u8, cpu_llc_id) = BAD_APICID;
70 /* Bitmask of currently online CPUs */
71 cpumask_t cpu_online_map __read_mostly;
73 EXPORT_SYMBOL(cpu_online_map);
76 * Private maps to synchronize booting between AP and BP.
77 * Probably not needed anymore, but it makes for easier debugging. -AK
79 cpumask_t cpu_callin_map;
80 cpumask_t cpu_callout_map;
81 EXPORT_SYMBOL(cpu_callout_map);
83 cpumask_t cpu_possible_map;
84 EXPORT_SYMBOL(cpu_possible_map);
86 /* Per CPU bogomips and other parameters */
87 DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info);
88 EXPORT_PER_CPU_SYMBOL(cpu_info);
90 /* Set when the idlers are all forked */
91 int smp_threads_ready;
93 /* representing HT siblings of each logical CPU */
94 DEFINE_PER_CPU(cpumask_t, cpu_sibling_map);
95 EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
97 /* representing HT and core siblings of each logical CPU */
98 DEFINE_PER_CPU(cpumask_t, cpu_core_map);
99 EXPORT_PER_CPU_SYMBOL(cpu_core_map);
102 * Trampoline 80x86 program as an array.
105 extern const unsigned char trampoline_data[];
106 extern const unsigned char trampoline_end[];
108 /* State of each CPU */
109 DEFINE_PER_CPU(int, cpu_state) = { 0 };
112 * Store all idle threads, this can be reused instead of creating
113 * a new thread. Also avoids complicated thread destroy functionality
116 struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ;
118 #define get_idle_for_cpu(x) (idle_thread_array[(x)])
119 #define set_idle_for_cpu(x,p) (idle_thread_array[(x)] = (p))
122 * Currently trivial. Write the real->protected mode
123 * bootstrap into the page concerned. The caller
124 * has made sure it's suitably aligned.
127 static unsigned long __cpuinit setup_trampoline(void)
129 void *tramp = __va(SMP_TRAMPOLINE_BASE);
130 memcpy(tramp, trampoline_data, trampoline_end - trampoline_data);
131 return virt_to_phys(tramp);
135 * The bootstrap kernel entry code has set these up. Save them for
139 static void __cpuinit smp_store_cpu_info(int id)
141 struct cpuinfo_x86 *c = &cpu_data(id);
149 static atomic_t init_deasserted __cpuinitdata;
152 * Report back to the Boot Processor.
155 void __cpuinit smp_callin(void)
158 unsigned long timeout;
161 * If waken up by an INIT in an 82489DX configuration
162 * we may get here before an INIT-deassert IPI reaches
163 * our local APIC. We have to wait for the IPI or we'll
164 * lock up on an APIC access.
166 while (!atomic_read(&init_deasserted))
170 * (This works even if the APIC is not enabled.)
172 phys_id = GET_APIC_ID(apic_read(APIC_ID));
173 cpuid = smp_processor_id();
174 if (cpu_isset(cpuid, cpu_callin_map)) {
175 panic("smp_callin: phys CPU#%d, CPU#%d already present??\n",
178 Dprintk("CPU#%d (phys ID: %d) waiting for CALLOUT\n", cpuid, phys_id);
181 * STARTUP IPIs are fragile beasts as they might sometimes
182 * trigger some glue motherboard logic. Complete APIC bus
183 * silence for 1 second, this overestimates the time the
184 * boot CPU is spending to send the up to 2 STARTUP IPIs
185 * by a factor of two. This should be enough.
189 * Waiting 2s total for startup (udelay is not yet working)
191 timeout = jiffies + 2*HZ;
192 while (time_before(jiffies, timeout)) {
194 * Has the boot CPU finished it's STARTUP sequence?
196 if (cpu_isset(cpuid, cpu_callout_map))
201 if (!time_before(jiffies, timeout)) {
202 panic("smp_callin: CPU%d started up but did not get a callout!\n",
207 * the boot CPU has finished the init stage and is spinning
208 * on callin_map until we finish. We are free to set up this
209 * CPU, first the APIC. (this is probably redundant on most
213 Dprintk("CALLIN, before setup_local_APIC().\n");
219 * Need to enable IRQs because it can take longer and then
220 * the NMI watchdog might kill us.
225 Dprintk("Stack at about %p\n",&cpuid);
228 * Save our processor parameters
230 smp_store_cpu_info(cpuid);
233 * Allow the master to continue.
235 cpu_set(cpuid, cpu_callin_map);
238 /* maps the cpu to the sched domain representing multi-core */
239 cpumask_t cpu_coregroup_map(int cpu)
241 struct cpuinfo_x86 *c = &cpu_data(cpu);
243 * For perf, we return last level cache shared map.
244 * And for power savings, we return cpu_core_map
246 if (sched_mc_power_savings || sched_smt_power_savings)
247 return per_cpu(cpu_core_map, cpu);
249 return c->llc_shared_map;
252 /* representing cpus for which sibling maps can be computed */
253 static cpumask_t cpu_sibling_setup_map;
255 static inline void set_cpu_sibling_map(int cpu)
258 struct cpuinfo_x86 *c = &cpu_data(cpu);
260 cpu_set(cpu, cpu_sibling_setup_map);
262 if (smp_num_siblings > 1) {
263 for_each_cpu_mask(i, cpu_sibling_setup_map) {
264 if (c->phys_proc_id == cpu_data(i).phys_proc_id &&
265 c->cpu_core_id == cpu_data(i).cpu_core_id) {
266 cpu_set(i, per_cpu(cpu_sibling_map, cpu));
267 cpu_set(cpu, per_cpu(cpu_sibling_map, i));
268 cpu_set(i, per_cpu(cpu_core_map, cpu));
269 cpu_set(cpu, per_cpu(cpu_core_map, i));
270 cpu_set(i, c->llc_shared_map);
271 cpu_set(cpu, cpu_data(i).llc_shared_map);
275 cpu_set(cpu, per_cpu(cpu_sibling_map, cpu));
278 cpu_set(cpu, c->llc_shared_map);
280 if (current_cpu_data.x86_max_cores == 1) {
281 per_cpu(cpu_core_map, cpu) = per_cpu(cpu_sibling_map, cpu);
286 for_each_cpu_mask(i, cpu_sibling_setup_map) {
287 if (per_cpu(cpu_llc_id, cpu) != BAD_APICID &&
288 per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) {
289 cpu_set(i, c->llc_shared_map);
290 cpu_set(cpu, cpu_data(i).llc_shared_map);
292 if (c->phys_proc_id == cpu_data(i).phys_proc_id) {
293 cpu_set(i, per_cpu(cpu_core_map, cpu));
294 cpu_set(cpu, per_cpu(cpu_core_map, i));
296 * Does this new cpu bringup a new core?
298 if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1) {
300 * for each core in package, increment
301 * the booted_cores for this new cpu
303 if (first_cpu(per_cpu(cpu_sibling_map, i)) == i)
306 * increment the core count for all
307 * the other cpus in this package
310 cpu_data(i).booted_cores++;
311 } else if (i != cpu && !c->booted_cores)
312 c->booted_cores = cpu_data(i).booted_cores;
318 * Setup code on secondary processor (after comming out of the trampoline)
320 void __cpuinit start_secondary(void)
323 * Dont put anything before smp_callin(), SMP
324 * booting is too fragile that we want to limit the
325 * things done here to the most necessary things.
331 /* otherwise gcc will move up the smp_processor_id before the cpu_init */
335 * Check TSC sync first:
337 check_tsc_sync_target();
339 if (nmi_watchdog == NMI_IO_APIC) {
340 disable_8259A_irq(0);
341 enable_NMI_through_LVT0(NULL);
346 * The sibling maps must be set before turing the online map on for
349 set_cpu_sibling_map(smp_processor_id());
352 * We need to hold call_lock, so there is no inconsistency
353 * between the time smp_call_function() determines number of
354 * IPI recipients, and the time when the determination is made
355 * for which cpus receive the IPI in genapic_flat.c. Holding this
356 * lock helps us to not include this cpu in a currently in progress
357 * smp_call_function().
359 lock_ipi_call_lock();
360 spin_lock(&vector_lock);
362 /* Setup the per cpu irq handling data structures */
363 __setup_vector_irq(smp_processor_id());
365 * Allow the master to continue.
367 cpu_set(smp_processor_id(), cpu_online_map);
368 per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
369 spin_unlock(&vector_lock);
371 unlock_ipi_call_lock();
373 setup_secondary_APIC_clock();
378 extern volatile unsigned long init_rsp;
379 extern void (*initial_code)(void);
382 static void inquire_remote_apic(int apicid)
384 unsigned i, regs[] = { APIC_ID >> 4, APIC_LVR >> 4, APIC_SPIV >> 4 };
385 char *names[] = { "ID", "VERSION", "SPIV" };
389 printk(KERN_INFO "Inquiring remote APIC #%d...\n", apicid);
391 for (i = 0; i < ARRAY_SIZE(regs); i++) {
392 printk(KERN_INFO "... APIC #%d %s: ", apicid, names[i]);
397 status = safe_apic_wait_icr_idle();
400 "a previous APIC delivery may have failed\n");
402 apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(apicid));
403 apic_write(APIC_ICR, APIC_DM_REMRD | regs[i]);
408 status = apic_read(APIC_ICR) & APIC_ICR_RR_MASK;
409 } while (status == APIC_ICR_RR_INPROG && timeout++ < 1000);
412 case APIC_ICR_RR_VALID:
413 status = apic_read(APIC_RRR);
414 printk(KERN_CONT "%08x\n", status);
417 printk(KERN_CONT "failed\n");
424 * Kick the secondary to wake up.
426 static int __cpuinit wakeup_secondary_via_INIT(int phys_apicid, unsigned int start_rip)
428 unsigned long send_status, accept_status = 0;
429 int maxlvt, num_starts, j;
431 Dprintk("Asserting INIT.\n");
434 * Turn INIT on target chip
436 apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
441 apic_write(APIC_ICR, APIC_INT_LEVELTRIG | APIC_INT_ASSERT
444 Dprintk("Waiting for send to finish...\n");
445 send_status = safe_apic_wait_icr_idle();
449 Dprintk("Deasserting INIT.\n");
452 apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
455 apic_write(APIC_ICR, APIC_INT_LEVELTRIG | APIC_DM_INIT);
457 Dprintk("Waiting for send to finish...\n");
458 send_status = safe_apic_wait_icr_idle();
461 atomic_set(&init_deasserted, 1);
466 * Run STARTUP IPI loop.
468 Dprintk("#startup loops: %d.\n", num_starts);
470 maxlvt = lapic_get_maxlvt();
472 for (j = 1; j <= num_starts; j++) {
473 Dprintk("Sending STARTUP #%d.\n",j);
474 apic_write(APIC_ESR, 0);
476 Dprintk("After apic_write.\n");
483 apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
485 /* Boot on the stack */
486 /* Kick the second */
487 apic_write(APIC_ICR, APIC_DM_STARTUP | (start_rip >> 12));
490 * Give the other CPU some time to accept the IPI.
494 Dprintk("Startup point 1.\n");
496 Dprintk("Waiting for send to finish...\n");
497 send_status = safe_apic_wait_icr_idle();
500 * Give the other CPU some time to accept the IPI.
504 * Due to the Pentium erratum 3AP.
507 apic_write(APIC_ESR, 0);
509 accept_status = (apic_read(APIC_ESR) & 0xEF);
510 if (send_status || accept_status)
513 Dprintk("After Startup.\n");
516 printk(KERN_ERR "APIC never delivered???\n");
518 printk(KERN_ERR "APIC delivery error (%lx).\n", accept_status);
520 return (send_status | accept_status);
524 struct work_struct work;
525 struct task_struct *idle;
526 struct completion done;
530 static void __cpuinit do_fork_idle(struct work_struct *work)
532 struct create_idle *c_idle =
533 container_of(work, struct create_idle, work);
535 c_idle->idle = fork_idle(c_idle->cpu);
536 complete(&c_idle->done);
542 static int __cpuinit do_boot_cpu(int cpu, int apicid)
544 unsigned long boot_error;
546 unsigned long start_rip;
547 struct create_idle c_idle = {
548 .work = __WORK_INITIALIZER(c_idle.work, do_fork_idle),
550 .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done),
553 /* allocate memory for gdts of secondary cpus. Hotplug is considered */
554 if (!cpu_gdt_descr[cpu].address &&
555 !(cpu_gdt_descr[cpu].address = get_zeroed_page(GFP_KERNEL))) {
556 printk(KERN_ERR "Failed to allocate GDT for CPU %d\n", cpu);
560 /* Allocate node local memory for AP pdas */
561 if (cpu_pda(cpu) == &boot_cpu_pda[cpu]) {
562 struct x8664_pda *newpda, *pda;
563 int node = cpu_to_node(cpu);
565 newpda = kmalloc_node(sizeof (struct x8664_pda), GFP_ATOMIC,
568 memcpy(newpda, pda, sizeof (struct x8664_pda));
569 cpu_pda(cpu) = newpda;
572 "Could not allocate node local PDA for CPU %d on node %d\n",
576 alternatives_smp_switch(1);
578 c_idle.idle = get_idle_for_cpu(cpu);
581 c_idle.idle->thread.rsp = (unsigned long) (((struct pt_regs *)
582 (THREAD_SIZE + task_stack_page(c_idle.idle))) - 1);
583 init_idle(c_idle.idle, cpu);
588 * During cold boot process, keventd thread is not spun up yet.
589 * When we do cpu hot-add, we create idle threads on the fly, we should
590 * not acquire any attributes from the calling context. Hence the clean
591 * way to create kernel_threads() is to do that from keventd().
592 * We do the current_is_keventd() due to the fact that ACPI notifier
593 * was also queuing to keventd() and when the caller is already running
594 * in context of keventd(), we would end up with locking up the keventd
597 if (!keventd_up() || current_is_keventd())
598 c_idle.work.func(&c_idle.work);
600 schedule_work(&c_idle.work);
601 wait_for_completion(&c_idle.done);
604 if (IS_ERR(c_idle.idle)) {
605 printk("failed fork for CPU %d\n", cpu);
606 return PTR_ERR(c_idle.idle);
609 set_idle_for_cpu(cpu, c_idle.idle);
613 cpu_pda(cpu)->pcurrent = c_idle.idle;
615 start_rip = setup_trampoline();
617 init_rsp = c_idle.idle->thread.rsp;
618 per_cpu(init_tss,cpu).rsp0 = init_rsp;
619 initial_code = start_secondary;
620 clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
622 printk(KERN_INFO "Booting processor %d/%d APIC 0x%x\n", cpu,
623 cpus_weight(cpu_present_map),
627 * This grunge runs the startup process for
628 * the targeted processor.
631 atomic_set(&init_deasserted, 0);
633 Dprintk("Setting warm reset code and vector.\n");
635 CMOS_WRITE(0xa, 0xf);
638 *((volatile unsigned short *) phys_to_virt(0x469)) = start_rip >> 4;
640 *((volatile unsigned short *) phys_to_virt(0x467)) = start_rip & 0xf;
644 * Be paranoid about clearing APIC errors.
646 apic_write(APIC_ESR, 0);
650 * Status is now clean
655 * Starting actual IPI sequence...
657 boot_error = wakeup_secondary_via_INIT(apicid, start_rip);
661 * allow APs to start initializing.
663 Dprintk("Before Callout %d.\n", cpu);
664 cpu_set(cpu, cpu_callout_map);
665 Dprintk("After Callout %d.\n", cpu);
668 * Wait 5s total for a response
670 for (timeout = 0; timeout < 50000; timeout++) {
671 if (cpu_isset(cpu, cpu_callin_map))
672 break; /* It has booted */
676 if (cpu_isset(cpu, cpu_callin_map)) {
677 /* number CPUs logically, starting from 1 (BSP is 0) */
678 Dprintk("CPU has booted.\n");
681 if (*((volatile unsigned char *)phys_to_virt(SMP_TRAMPOLINE_BASE))
683 /* trampoline started but...? */
684 printk("Stuck ??\n");
686 /* trampoline code not run */
687 printk("Not responding.\n");
689 inquire_remote_apic(apicid);
694 cpu_clear(cpu, cpu_callout_map); /* was set here (do_boot_cpu()) */
695 clear_bit(cpu, &cpu_initialized); /* was set by cpu_init() */
696 clear_node_cpumask(cpu); /* was set by numa_add_cpu */
697 cpu_clear(cpu, cpu_present_map);
698 cpu_clear(cpu, cpu_possible_map);
699 per_cpu(x86_cpu_to_apicid, cpu) = BAD_APICID;
706 cycles_t cacheflush_time;
707 unsigned long cache_decay_ticks;
710 * Cleanup possible dangling ends...
712 static __cpuinit void smp_cleanup_boot(void)
715 * Paranoid: Set warm reset code and vector here back
721 * Reset trampoline flag
723 *((volatile int *) phys_to_virt(0x467)) = 0;
727 * Fall back to non SMP mode after errors.
729 * RED-PEN audit/test this more. I bet there is more state messed up here.
731 static __init void disable_smp(void)
733 cpu_present_map = cpumask_of_cpu(0);
734 cpu_possible_map = cpumask_of_cpu(0);
735 if (smp_found_config)
736 phys_cpu_present_map = physid_mask_of_physid(boot_cpu_id);
738 phys_cpu_present_map = physid_mask_of_physid(0);
739 cpu_set(0, per_cpu(cpu_sibling_map, 0));
740 cpu_set(0, per_cpu(cpu_core_map, 0));
743 #ifdef CONFIG_HOTPLUG_CPU
745 int additional_cpus __initdata = -1;
748 * cpu_possible_map should be static, it cannot change as cpu's
749 * are onlined, or offlined. The reason is per-cpu data-structures
750 * are allocated by some modules at init time, and dont expect to
751 * do this dynamically on cpu arrival/departure.
752 * cpu_present_map on the other hand can change dynamically.
753 * In case when cpu_hotplug is not compiled, then we resort to current
754 * behaviour, which is cpu_possible == cpu_present.
757 * Three ways to find out the number of additional hotplug CPUs:
758 * - If the BIOS specified disabled CPUs in ACPI/mptables use that.
759 * - The user can overwrite it with additional_cpus=NUM
760 * - Otherwise don't reserve additional CPUs.
761 * We do this because additional CPUs waste a lot of memory.
764 __init void prefill_possible_map(void)
769 if (additional_cpus == -1) {
770 if (disabled_cpus > 0)
771 additional_cpus = disabled_cpus;
775 possible = num_processors + additional_cpus;
776 if (possible > NR_CPUS)
779 printk(KERN_INFO "SMP: Allowing %d CPUs, %d hotplug CPUs\n",
781 max_t(int, possible - num_processors, 0));
783 for (i = 0; i < possible; i++)
784 cpu_set(i, cpu_possible_map);
789 * Various sanity checks.
791 static int __init smp_sanity_check(unsigned max_cpus)
793 if (!physid_isset(hard_smp_processor_id(), phys_cpu_present_map)) {
794 printk("weird, boot CPU (#%d) not listed by the BIOS.\n",
795 hard_smp_processor_id());
796 physid_set(hard_smp_processor_id(), phys_cpu_present_map);
800 * If we couldn't find an SMP configuration at boot time,
801 * get out of here now!
803 if (!smp_found_config) {
804 printk(KERN_NOTICE "SMP motherboard not detected.\n");
806 if (APIC_init_uniprocessor())
807 printk(KERN_NOTICE "Local APIC not detected."
808 " Using dummy APIC emulation.\n");
813 * Should not be necessary because the MP table should list the boot
814 * CPU too, but we do it for the sake of robustness anyway.
816 if (!physid_isset(boot_cpu_id, phys_cpu_present_map)) {
817 printk(KERN_NOTICE "weird, boot CPU (#%d) not listed by the BIOS.\n",
819 physid_set(hard_smp_processor_id(), phys_cpu_present_map);
823 * If we couldn't find a local APIC, then get out of here now!
826 printk(KERN_ERR "BIOS bug, local APIC #%d not detected!...\n",
828 printk(KERN_ERR "... forcing use of dummy APIC emulation. (tell your hw vendor)\n");
834 * If SMP should be disabled, then really disable it!
837 printk(KERN_INFO "SMP mode deactivated, forcing use of dummy APIC emulation.\n");
846 * Copy apicid's found by MP_processor_info from initial array to the per cpu
847 * data area. The x86_cpu_to_apicid_init array is then expendable and the
848 * x86_cpu_to_apicid_ptr is zeroed indicating that the static array is no
851 void __init smp_set_apicids(void)
855 for_each_cpu_mask(cpu, cpu_possible_map) {
856 if (per_cpu_offset(cpu))
857 per_cpu(x86_cpu_to_apicid, cpu) =
858 x86_cpu_to_apicid_init[cpu];
861 /* indicate the static array will be going away soon */
862 x86_cpu_to_apicid_ptr = NULL;
866 * Prepare for SMP bootup. The MP table or ACPI has been read
867 * earlier. Just do some sanity checking here and enable APIC mode.
869 void __init smp_prepare_cpus(unsigned int max_cpus)
871 nmi_watchdog_default();
872 current_cpu_data = boot_cpu_data;
873 current_thread_info()->cpu = 0; /* needed? */
875 set_cpu_sibling_map(0);
877 if (smp_sanity_check(max_cpus) < 0) {
878 printk(KERN_INFO "SMP disabled\n");
885 * Switch from PIC to APIC mode.
889 if (GET_APIC_ID(apic_read(APIC_ID)) != boot_cpu_id) {
890 panic("Boot APIC ID in local APIC unexpected (%d vs %d)",
891 GET_APIC_ID(apic_read(APIC_ID)), boot_cpu_id);
892 /* Or can we switch back to PIC here? */
896 * Now start the IO-APICs
898 if (!skip_ioapic_setup && nr_ioapics)
904 * Set up local APIC timer on boot CPU.
907 setup_boot_APIC_clock();
911 * Early setup to make printk work.
913 void __init smp_prepare_boot_cpu(void)
915 int me = smp_processor_id();
916 cpu_set(me, cpu_online_map);
917 cpu_set(me, cpu_callout_map);
918 per_cpu(cpu_state, me) = CPU_ONLINE;
922 * Entry point to boot a CPU.
924 int __cpuinit __cpu_up(unsigned int cpu)
926 int apicid = cpu_present_to_apicid(cpu);
930 WARN_ON(irqs_disabled());
932 Dprintk("++++++++++++++++++++=_---CPU UP %u\n", cpu);
934 if (apicid == BAD_APICID || apicid == boot_cpu_id ||
935 !physid_isset(apicid, phys_cpu_present_map)) {
936 printk("__cpu_up: bad cpu %d\n", cpu);
941 * Already booted CPU?
943 if (cpu_isset(cpu, cpu_callin_map)) {
944 Dprintk("do_boot_cpu %d Already started\n", cpu);
949 * Save current MTRR state in case it was changed since early boot
950 * (e.g. by the ACPI SMI) to initialize new CPUs with MTRRs in sync:
954 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
956 err = do_boot_cpu(cpu, apicid);
958 Dprintk("do_boot_cpu failed %d\n", err);
962 /* Unleash the CPU! */
963 Dprintk("waiting for cpu %d\n", cpu);
966 * Make sure and check TSC sync:
968 local_irq_save(flags);
969 check_tsc_sync_source(cpu);
970 local_irq_restore(flags);
972 while (!cpu_isset(cpu, cpu_online_map))
980 * Finish the SMP boot.
982 void __init smp_cpus_done(unsigned int max_cpus)
986 check_nmi_watchdog();
989 #ifdef CONFIG_HOTPLUG_CPU
991 static void remove_siblinginfo(int cpu)
994 struct cpuinfo_x86 *c = &cpu_data(cpu);
996 for_each_cpu_mask(sibling, per_cpu(cpu_core_map, cpu)) {
997 cpu_clear(cpu, per_cpu(cpu_core_map, sibling));
999 * last thread sibling in this cpu core going down
1001 if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1)
1002 cpu_data(sibling).booted_cores--;
1005 for_each_cpu_mask(sibling, per_cpu(cpu_sibling_map, cpu))
1006 cpu_clear(cpu, per_cpu(cpu_sibling_map, sibling));
1007 cpus_clear(per_cpu(cpu_sibling_map, cpu));
1008 cpus_clear(per_cpu(cpu_core_map, cpu));
1009 c->phys_proc_id = 0;
1011 cpu_clear(cpu, cpu_sibling_setup_map);
1014 void remove_cpu_from_maps(void)
1016 int cpu = smp_processor_id();
1018 cpu_clear(cpu, cpu_callout_map);
1019 cpu_clear(cpu, cpu_callin_map);
1020 clear_bit(cpu, &cpu_initialized); /* was set by cpu_init() */
1021 clear_node_cpumask(cpu);
1024 int __cpu_disable(void)
1026 int cpu = smp_processor_id();
1029 * Perhaps use cpufreq to drop frequency, but that could go
1030 * into generic code.
1032 * We won't take down the boot processor on i386 due to some
1033 * interrupts only being able to be serviced by the BSP.
1034 * Especially so if we're not using an IOAPIC -zwane
1039 if (nmi_watchdog == NMI_LOCAL_APIC)
1040 stop_apic_nmi_watchdog(NULL);
1045 * Allow any queued timer interrupts to get serviced
1046 * This is only a temporary solution until we cleanup
1047 * fixup_irqs as we do for IA64.
1052 local_irq_disable();
1053 remove_siblinginfo(cpu);
1055 spin_lock(&vector_lock);
1056 /* It's now safe to remove this processor from the online map */
1057 cpu_clear(cpu, cpu_online_map);
1058 spin_unlock(&vector_lock);
1059 remove_cpu_from_maps();
1060 fixup_irqs(cpu_online_map);
1064 void __cpu_die(unsigned int cpu)
1066 /* We don't do anything here: idle task is faking death itself. */
1069 for (i = 0; i < 10; i++) {
1070 /* They ack this in play_dead by setting CPU_DEAD */
1071 if (per_cpu(cpu_state, cpu) == CPU_DEAD) {
1072 printk ("CPU %d is now offline\n", cpu);
1073 if (1 == num_online_cpus())
1074 alternatives_smp_switch(0);
1079 printk(KERN_ERR "CPU %u didn't die...\n", cpu);
1082 static __init int setup_additional_cpus(char *s)
1084 return s && get_option(&s, &additional_cpus) ? 0 : -EINVAL;
1086 early_param("additional_cpus", setup_additional_cpus);
1088 #else /* ... !CONFIG_HOTPLUG_CPU */
1090 int __cpu_disable(void)
1095 void __cpu_die(unsigned int cpu)
1097 /* We said "no" in __cpu_disable */
1100 #endif /* CONFIG_HOTPLUG_CPU */