2 * x86 SMP booting functions
4 * (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
5 * (c) 1998, 1999, 2000 Ingo Molnar <mingo@redhat.com>
7 * Much of the core SMP work is based on previous work by Thomas Radke, to
8 * whom a great many thanks are extended.
10 * Thanks to Intel for making available several different Pentium,
11 * Pentium Pro and Pentium-II/Xeon MP machines.
12 * Original development of Linux SMP code supported by Caldera.
14 * This code is released under the GNU General Public License version 2 or
18 * Felix Koop : NR_CPUS used properly
19 * Jose Renau : Handle single CPU case.
20 * Alan Cox : By repeated request 8) - Total BogoMIPS report.
21 * Greg Wright : Fix for kernel stacks panic.
22 * Erich Boleyn : MP v1.4 and additional changes.
23 * Matthias Sattler : Changes for 2.1 kernel map.
24 * Michel Lespinasse : Changes for 2.1 kernel map.
25 * Michael Chastain : Change trampoline.S to gnu as.
26 * Alan Cox : Dumb bug: 'B' step PPro's are fine
27 * Ingo Molnar : Added APIC timers, based on code
29 * Ingo Molnar : various cleanups and rewrites
30 * Tigran Aivazian : fixed "0.00 in /proc/uptime on SMP" bug.
31 * Maciej W. Rozycki : Bits for genuine 82489DX APICs
32 * Martin J. Bligh : Added support for multi-quad systems
33 * Dave Jones : Report invalid combinations of Athlon CPUs.
34 * Rusty Russell : Hacked into shape for new "hotplug" boot process. */
37 /* SMP boot always wants to use real time delay to allow sufficient time for
38 * the APs to come online */
39 #define USE_REAL_TIME_DELAY
41 #include <linux/module.h>
42 #include <linux/init.h>
43 #include <linux/kernel.h>
46 #include <linux/sched.h>
47 #include <linux/kernel_stat.h>
48 #include <linux/smp_lock.h>
49 #include <linux/bootmem.h>
50 #include <linux/notifier.h>
51 #include <linux/cpu.h>
52 #include <linux/percpu.h>
54 #include <linux/delay.h>
55 #include <linux/mc146818rtc.h>
56 #include <asm/tlbflush.h>
58 #include <asm/arch_hooks.h>
61 #include <asm/genapic.h>
63 #include <mach_apic.h>
64 #include <mach_wakecpu.h>
65 #include <smpboot_hooks.h>
68 /* Set if we find a B stepping CPU */
69 static int __devinitdata smp_b_stepping;
71 /* Number of siblings per CPU package */
72 int smp_num_siblings = 1;
73 EXPORT_SYMBOL(smp_num_siblings);
75 /* Last level cache ID of each logical CPU */
76 int cpu_llc_id[NR_CPUS] __cpuinitdata = {[0 ... NR_CPUS-1] = BAD_APICID};
78 /* representing HT siblings of each logical CPU */
79 cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly;
80 EXPORT_SYMBOL(cpu_sibling_map);
82 /* representing HT and core siblings of each logical CPU */
83 cpumask_t cpu_core_map[NR_CPUS] __read_mostly;
84 EXPORT_SYMBOL(cpu_core_map);
86 /* bitmap of online cpus */
87 cpumask_t cpu_online_map __read_mostly;
88 EXPORT_SYMBOL(cpu_online_map);
90 cpumask_t cpu_callin_map;
91 cpumask_t cpu_callout_map;
92 EXPORT_SYMBOL(cpu_callout_map);
93 cpumask_t cpu_possible_map;
94 EXPORT_SYMBOL(cpu_possible_map);
95 static cpumask_t smp_commenced_mask;
97 /* TSC's upper 32 bits can't be written in eariler CPU (before prescott), there
98 * is no way to resync one AP against BP. TBD: for prescott and above, we
99 * should use IA64's algorithm
101 static int __devinitdata tsc_sync_disabled;
103 /* Per CPU bogomips and other parameters */
104 struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned;
105 EXPORT_SYMBOL(cpu_data);
107 u8 x86_cpu_to_apicid[NR_CPUS] __read_mostly =
108 { [0 ... NR_CPUS-1] = 0xff };
109 EXPORT_SYMBOL(x86_cpu_to_apicid);
111 u8 apicid_2_node[MAX_APICID];
114 * Trampoline 80x86 program as an array.
117 extern unsigned char trampoline_data [];
118 extern unsigned char trampoline_end [];
119 static unsigned char *trampoline_base;
120 static int trampoline_exec;
122 static void map_cpu_to_logical_apicid(void);
124 /* State of each CPU. */
125 DEFINE_PER_CPU(int, cpu_state) = { 0 };
128 * Currently trivial. Write the real->protected mode
129 * bootstrap into the page concerned. The caller
130 * has made sure it's suitably aligned.
133 static unsigned long __devinit setup_trampoline(void)
135 memcpy(trampoline_base, trampoline_data, trampoline_end - trampoline_data);
136 return virt_to_phys(trampoline_base);
140 * We are called very early to get the low memory for the
141 * SMP bootup trampoline page.
143 void __init smp_alloc_memory(void)
145 trampoline_base = (void *) alloc_bootmem_low_pages(PAGE_SIZE);
147 * Has to be in very low memory so we can execute
150 if (__pa(trampoline_base) >= 0x9F000)
153 * Make the SMP trampoline executable:
155 trampoline_exec = set_kernel_exec((unsigned long)trampoline_base, 1);
159 * The bootstrap kernel entry code has set these up. Save them for
163 static void __cpuinit smp_store_cpu_info(int id)
165 struct cpuinfo_x86 *c = cpu_data + id;
171 * Mask B, Pentium, but not Pentium MMX
173 if (c->x86_vendor == X86_VENDOR_INTEL &&
175 c->x86_mask >= 1 && c->x86_mask <= 4 &&
178 * Remember we have B step Pentia with bugs
183 * Certain Athlons might work (for various values of 'work') in SMP
184 * but they are not certified as MP capable.
186 if ((c->x86_vendor == X86_VENDOR_AMD) && (c->x86 == 6)) {
188 if (num_possible_cpus() == 1)
191 /* Athlon 660/661 is valid. */
192 if ((c->x86_model==6) && ((c->x86_mask==0) || (c->x86_mask==1)))
195 /* Duron 670 is valid */
196 if ((c->x86_model==7) && (c->x86_mask==0))
200 * Athlon 662, Duron 671, and Athlon >model 7 have capability bit.
201 * It's worth noting that the A5 stepping (662) of some Athlon XP's
202 * have the MP bit set.
203 * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for more.
205 if (((c->x86_model==6) && (c->x86_mask>=2)) ||
206 ((c->x86_model==7) && (c->x86_mask>=1)) ||
211 /* If we get here, it's not a certified SMP capable AMD system. */
212 add_taint(TAINT_UNSAFE_SMP);
220 * TSC synchronization.
222 * We first check whether all CPUs have their TSC's synchronized,
223 * then we print a warning if not, and always resync.
228 atomic_t count_start;
230 unsigned long long values[NR_CPUS];
231 } tsc __cpuinitdata = {
232 .start_flag = ATOMIC_INIT(0),
233 .count_start = ATOMIC_INIT(0),
234 .count_stop = ATOMIC_INIT(0),
239 static void __init synchronize_tsc_bp(void)
242 unsigned long long t0;
243 unsigned long long sum, avg;
245 unsigned int one_usec;
248 printk(KERN_INFO "checking TSC synchronization across %u CPUs: ", num_booting_cpus());
250 /* convert from kcyc/sec to cyc/usec */
251 one_usec = cpu_khz / 1000;
253 atomic_set(&tsc.start_flag, 1);
257 * We loop a few times to get a primed instruction cache,
258 * then the last pass is more or less synchronized and
259 * the BP and APs set their cycle counters to zero all at
260 * once. This reduces the chance of having random offsets
261 * between the processors, and guarantees that the maximum
262 * delay between the cycle counters is never bigger than
263 * the latency of information-passing (cachelines) between
266 for (i = 0; i < NR_LOOPS; i++) {
268 * all APs synchronize but they loop on '== num_cpus'
270 while (atomic_read(&tsc.count_start) != num_booting_cpus()-1)
272 atomic_set(&tsc.count_stop, 0);
275 * this lets the APs save their current TSC:
277 atomic_inc(&tsc.count_start);
279 rdtscll(tsc.values[smp_processor_id()]);
281 * We clear the TSC in the last loop:
287 * Wait for all APs to leave the synchronization point:
289 while (atomic_read(&tsc.count_stop) != num_booting_cpus()-1)
291 atomic_set(&tsc.count_start, 0);
293 atomic_inc(&tsc.count_stop);
297 for (i = 0; i < NR_CPUS; i++) {
298 if (cpu_isset(i, cpu_callout_map)) {
304 do_div(avg, num_booting_cpus());
306 for (i = 0; i < NR_CPUS; i++) {
307 if (!cpu_isset(i, cpu_callout_map))
309 delta = tsc.values[i] - avg;
313 * We report bigger than 2 microseconds clock differences.
315 if (delta > 2*one_usec) {
323 do_div(realdelta, one_usec);
324 if (tsc.values[i] < avg)
325 realdelta = -realdelta;
328 printk(KERN_INFO "CPU#%d had %Ld usecs TSC "
329 "skew, fixed it up.\n", i, realdelta);
336 static void __cpuinit synchronize_tsc_ap(void)
341 * Not every cpu is online at the time
342 * this gets called, so we first wait for the BP to
343 * finish SMP initialization:
345 while (!atomic_read(&tsc.start_flag))
348 for (i = 0; i < NR_LOOPS; i++) {
349 atomic_inc(&tsc.count_start);
350 while (atomic_read(&tsc.count_start) != num_booting_cpus())
353 rdtscll(tsc.values[smp_processor_id()]);
357 atomic_inc(&tsc.count_stop);
358 while (atomic_read(&tsc.count_stop) != num_booting_cpus())
364 extern void calibrate_delay(void);
366 static atomic_t init_deasserted;
368 static void __cpuinit smp_callin(void)
371 unsigned long timeout;
374 * If waken up by an INIT in an 82489DX configuration
375 * we may get here before an INIT-deassert IPI reaches
376 * our local APIC. We have to wait for the IPI or we'll
377 * lock up on an APIC access.
379 wait_for_init_deassert(&init_deasserted);
382 * (This works even if the APIC is not enabled.)
384 phys_id = GET_APIC_ID(apic_read(APIC_ID));
385 cpuid = smp_processor_id();
386 if (cpu_isset(cpuid, cpu_callin_map)) {
387 printk("huh, phys CPU#%d, CPU#%d already present??\n",
391 Dprintk("CPU#%d (phys ID: %d) waiting for CALLOUT\n", cpuid, phys_id);
394 * STARTUP IPIs are fragile beasts as they might sometimes
395 * trigger some glue motherboard logic. Complete APIC bus
396 * silence for 1 second, this overestimates the time the
397 * boot CPU is spending to send the up to 2 STARTUP IPIs
398 * by a factor of two. This should be enough.
402 * Waiting 2s total for startup (udelay is not yet working)
404 timeout = jiffies + 2*HZ;
405 while (time_before(jiffies, timeout)) {
407 * Has the boot CPU finished it's STARTUP sequence?
409 if (cpu_isset(cpuid, cpu_callout_map))
414 if (!time_before(jiffies, timeout)) {
415 printk("BUG: CPU%d started up but did not get a callout!\n",
421 * the boot CPU has finished the init stage and is spinning
422 * on callin_map until we finish. We are free to set up this
423 * CPU, first the APIC. (this is probably redundant on most
427 Dprintk("CALLIN, before setup_local_APIC().\n");
428 smp_callin_clear_local_apic();
430 map_cpu_to_logical_apicid();
436 Dprintk("Stack at about %p\n",&cpuid);
439 * Save our processor parameters
441 smp_store_cpu_info(cpuid);
443 disable_APIC_timer();
446 * Allow the master to continue.
448 cpu_set(cpuid, cpu_callin_map);
451 * Synchronize the TSC with the BP
453 if (cpu_has_tsc && cpu_khz && !tsc_sync_disabled)
454 synchronize_tsc_ap();
459 /* maps the cpu to the sched domain representing multi-core */
460 cpumask_t cpu_coregroup_map(int cpu)
462 struct cpuinfo_x86 *c = cpu_data + cpu;
464 * For perf, we return last level cache shared map.
465 * And for power savings, we return cpu_core_map
467 if (sched_mc_power_savings || sched_smt_power_savings)
468 return cpu_core_map[cpu];
470 return c->llc_shared_map;
473 /* representing cpus for which sibling maps can be computed */
474 static cpumask_t cpu_sibling_setup_map;
477 set_cpu_sibling_map(int cpu)
480 struct cpuinfo_x86 *c = cpu_data;
482 cpu_set(cpu, cpu_sibling_setup_map);
484 if (smp_num_siblings > 1) {
485 for_each_cpu_mask(i, cpu_sibling_setup_map) {
486 if (c[cpu].phys_proc_id == c[i].phys_proc_id &&
487 c[cpu].cpu_core_id == c[i].cpu_core_id) {
488 cpu_set(i, cpu_sibling_map[cpu]);
489 cpu_set(cpu, cpu_sibling_map[i]);
490 cpu_set(i, cpu_core_map[cpu]);
491 cpu_set(cpu, cpu_core_map[i]);
492 cpu_set(i, c[cpu].llc_shared_map);
493 cpu_set(cpu, c[i].llc_shared_map);
497 cpu_set(cpu, cpu_sibling_map[cpu]);
500 cpu_set(cpu, c[cpu].llc_shared_map);
502 if (current_cpu_data.x86_max_cores == 1) {
503 cpu_core_map[cpu] = cpu_sibling_map[cpu];
504 c[cpu].booted_cores = 1;
508 for_each_cpu_mask(i, cpu_sibling_setup_map) {
509 if (cpu_llc_id[cpu] != BAD_APICID &&
510 cpu_llc_id[cpu] == cpu_llc_id[i]) {
511 cpu_set(i, c[cpu].llc_shared_map);
512 cpu_set(cpu, c[i].llc_shared_map);
514 if (c[cpu].phys_proc_id == c[i].phys_proc_id) {
515 cpu_set(i, cpu_core_map[cpu]);
516 cpu_set(cpu, cpu_core_map[i]);
518 * Does this new cpu bringup a new core?
520 if (cpus_weight(cpu_sibling_map[cpu]) == 1) {
522 * for each core in package, increment
523 * the booted_cores for this new cpu
525 if (first_cpu(cpu_sibling_map[i]) == i)
526 c[cpu].booted_cores++;
528 * increment the core count for all
529 * the other cpus in this package
533 } else if (i != cpu && !c[cpu].booted_cores)
534 c[cpu].booted_cores = c[i].booted_cores;
540 * Activate a secondary processor.
542 static void __cpuinit start_secondary(void *unused)
545 * Don't put *anything* before secondary_cpu_init(), SMP
546 * booting is too fragile that we want to limit the
547 * things done here to the most necessary things.
552 secondary_cpu_init();
555 while (!cpu_isset(smp_processor_id(), smp_commenced_mask))
557 setup_secondary_clock();
558 if (nmi_watchdog == NMI_IO_APIC) {
559 disable_8259A_irq(0);
560 enable_NMI_through_LVT0(NULL);
565 * low-memory mappings have been cleared, flush them from
566 * the local TLBs too.
570 /* This must be done before setting cpu_online_map */
571 set_cpu_sibling_map(raw_smp_processor_id());
575 * We need to hold call_lock, so there is no inconsistency
576 * between the time smp_call_function() determines number of
577 * IPI receipients, and the time when the determination is made
578 * for which cpus receive the IPI. Holding this
579 * lock helps us to not include this cpu in a currently in progress
580 * smp_call_function().
582 lock_ipi_call_lock();
583 cpu_set(smp_processor_id(), cpu_online_map);
584 unlock_ipi_call_lock();
585 per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
587 /* We can take interrupts now: we're officially "up". */
595 * Everything has been set up for the secondary
596 * CPUs - they just need to reload everything
597 * from the task structure
598 * This function must not return.
600 void __devinit initialize_secondary(void)
603 * switch to the per CPU GDT we already set up
606 cpu_set_gdt(current_thread_info()->cpu);
609 * We don't actually need to load the full TSS,
610 * basically just the stack pointer and the eip.
617 :"m" (current->thread.esp),"m" (current->thread.eip));
620 /* Static state in head.S used to set up a CPU */
625 extern struct i386_pda *start_pda;
629 /* which logical CPUs are on which nodes */
630 cpumask_t node_2_cpu_mask[MAX_NUMNODES] __read_mostly =
631 { [0 ... MAX_NUMNODES-1] = CPU_MASK_NONE };
632 EXPORT_SYMBOL(node_2_cpu_mask);
633 /* which node each logical CPU is on */
634 int cpu_2_node[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = 0 };
635 EXPORT_SYMBOL(cpu_2_node);
637 /* set up a mapping between cpu and node. */
638 static inline void map_cpu_to_node(int cpu, int node)
640 printk("Mapping cpu %d to node %d\n", cpu, node);
641 cpu_set(cpu, node_2_cpu_mask[node]);
642 cpu_2_node[cpu] = node;
645 /* undo a mapping between cpu and node. */
646 static inline void unmap_cpu_to_node(int cpu)
650 printk("Unmapping cpu %d from all nodes\n", cpu);
651 for (node = 0; node < MAX_NUMNODES; node ++)
652 cpu_clear(cpu, node_2_cpu_mask[node]);
655 #else /* !CONFIG_NUMA */
657 #define map_cpu_to_node(cpu, node) ({})
658 #define unmap_cpu_to_node(cpu) ({})
660 #endif /* CONFIG_NUMA */
662 u8 cpu_2_logical_apicid[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = BAD_APICID };
664 static void map_cpu_to_logical_apicid(void)
666 int cpu = smp_processor_id();
667 int apicid = logical_smp_processor_id();
668 int node = apicid_to_node(apicid);
670 if (!node_online(node))
671 node = first_online_node;
673 cpu_2_logical_apicid[cpu] = apicid;
674 map_cpu_to_node(cpu, node);
677 static void unmap_cpu_to_logical_apicid(int cpu)
679 cpu_2_logical_apicid[cpu] = BAD_APICID;
680 unmap_cpu_to_node(cpu);
684 static inline void __inquire_remote_apic(int apicid)
686 int i, regs[] = { APIC_ID >> 4, APIC_LVR >> 4, APIC_SPIV >> 4 };
687 char *names[] = { "ID", "VERSION", "SPIV" };
690 printk("Inquiring remote APIC #%d...\n", apicid);
692 for (i = 0; i < ARRAY_SIZE(regs); i++) {
693 printk("... APIC #%d %s: ", apicid, names[i]);
698 apic_wait_icr_idle();
700 apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(apicid));
701 apic_write_around(APIC_ICR, APIC_DM_REMRD | regs[i]);
706 status = apic_read(APIC_ICR) & APIC_ICR_RR_MASK;
707 } while (status == APIC_ICR_RR_INPROG && timeout++ < 1000);
710 case APIC_ICR_RR_VALID:
711 status = apic_read(APIC_RRR);
712 printk("%08x\n", status);
721 #ifdef WAKE_SECONDARY_VIA_NMI
723 * Poke the other CPU in the eye via NMI to wake it up. Remember that the normal
724 * INIT, INIT, STARTUP sequence will reset the chip hard for us, and this
725 * won't ... remember to clear down the APIC, etc later.
728 wakeup_secondary_cpu(int logical_apicid, unsigned long start_eip)
730 unsigned long send_status = 0, accept_status = 0;
734 apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(logical_apicid));
736 /* Boot on the stack */
737 /* Kick the second */
738 apic_write_around(APIC_ICR, APIC_DM_NMI | APIC_DEST_LOGICAL);
740 Dprintk("Waiting for send to finish...\n");
745 send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
746 } while (send_status && (timeout++ < 1000));
749 * Give the other CPU some time to accept the IPI.
753 * Due to the Pentium erratum 3AP.
755 maxlvt = get_maxlvt();
757 apic_read_around(APIC_SPIV);
758 apic_write(APIC_ESR, 0);
760 accept_status = (apic_read(APIC_ESR) & 0xEF);
761 Dprintk("NMI sent.\n");
764 printk("APIC never delivered???\n");
766 printk("APIC delivery error (%lx).\n", accept_status);
768 return (send_status | accept_status);
770 #endif /* WAKE_SECONDARY_VIA_NMI */
772 #ifdef WAKE_SECONDARY_VIA_INIT
774 wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip)
776 unsigned long send_status = 0, accept_status = 0;
777 int maxlvt, timeout, num_starts, j;
780 * Be paranoid about clearing APIC errors.
782 if (APIC_INTEGRATED(apic_version[phys_apicid])) {
783 apic_read_around(APIC_SPIV);
784 apic_write(APIC_ESR, 0);
788 Dprintk("Asserting INIT.\n");
791 * Turn INIT on target chip
793 apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
798 apic_write_around(APIC_ICR, APIC_INT_LEVELTRIG | APIC_INT_ASSERT
801 Dprintk("Waiting for send to finish...\n");
806 send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
807 } while (send_status && (timeout++ < 1000));
811 Dprintk("Deasserting INIT.\n");
814 apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
817 apic_write_around(APIC_ICR, APIC_INT_LEVELTRIG | APIC_DM_INIT);
819 Dprintk("Waiting for send to finish...\n");
824 send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
825 } while (send_status && (timeout++ < 1000));
827 atomic_set(&init_deasserted, 1);
830 * Should we send STARTUP IPIs ?
832 * Determine this based on the APIC version.
833 * If we don't have an integrated APIC, don't send the STARTUP IPIs.
835 if (APIC_INTEGRATED(apic_version[phys_apicid]))
841 * Paravirt / VMI wants a startup IPI hook here to set up the
842 * target processor state.
844 startup_ipi_hook(phys_apicid, (unsigned long) start_secondary,
845 (unsigned long) stack_start.esp);
848 * Run STARTUP IPI loop.
850 Dprintk("#startup loops: %d.\n", num_starts);
852 maxlvt = get_maxlvt();
854 for (j = 1; j <= num_starts; j++) {
855 Dprintk("Sending STARTUP #%d.\n",j);
856 apic_read_around(APIC_SPIV);
857 apic_write(APIC_ESR, 0);
859 Dprintk("After apic_write.\n");
866 apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
868 /* Boot on the stack */
869 /* Kick the second */
870 apic_write_around(APIC_ICR, APIC_DM_STARTUP
871 | (start_eip >> 12));
874 * Give the other CPU some time to accept the IPI.
878 Dprintk("Startup point 1.\n");
880 Dprintk("Waiting for send to finish...\n");
885 send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
886 } while (send_status && (timeout++ < 1000));
889 * Give the other CPU some time to accept the IPI.
893 * Due to the Pentium erratum 3AP.
896 apic_read_around(APIC_SPIV);
897 apic_write(APIC_ESR, 0);
899 accept_status = (apic_read(APIC_ESR) & 0xEF);
900 if (send_status || accept_status)
903 Dprintk("After Startup.\n");
906 printk("APIC never delivered???\n");
908 printk("APIC delivery error (%lx).\n", accept_status);
910 return (send_status | accept_status);
912 #endif /* WAKE_SECONDARY_VIA_INIT */
914 extern cpumask_t cpu_initialized;
915 static inline int alloc_cpu_id(void)
919 cpus_complement(tmp_map, cpu_present_map);
920 cpu = first_cpu(tmp_map);
926 #ifdef CONFIG_HOTPLUG_CPU
927 static struct task_struct * __devinitdata cpu_idle_tasks[NR_CPUS];
928 static inline struct task_struct * alloc_idle_task(int cpu)
930 struct task_struct *idle;
932 if ((idle = cpu_idle_tasks[cpu]) != NULL) {
933 /* initialize thread_struct. we really want to avoid destroy
936 idle->thread.esp = (unsigned long)task_pt_regs(idle);
937 init_idle(idle, cpu);
940 idle = fork_idle(cpu);
943 cpu_idle_tasks[cpu] = idle;
947 #define alloc_idle_task(cpu) fork_idle(cpu)
950 static int __cpuinit do_boot_cpu(int apicid, int cpu)
952 * NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad
953 * (ie clustered apic addressing mode), this is a LOGICAL apic ID.
954 * Returns zero if CPU booted OK, else error code from wakeup_secondary_cpu.
957 struct task_struct *idle;
958 unsigned long boot_error;
960 unsigned long start_eip;
961 unsigned short nmi_high = 0, nmi_low = 0;
964 * We can't use kernel_thread since we must avoid to
965 * reschedule the child.
967 idle = alloc_idle_task(cpu);
969 panic("failed fork for CPU %d", cpu);
971 /* Pre-allocate and initialize the CPU's GDT and PDA so it
972 doesn't have to do any memory allocation during the
973 delicate CPU-bringup phase. */
974 if (!init_gdt(cpu, idle)) {
975 printk(KERN_INFO "Couldn't allocate GDT/PDA for CPU %d\n", cpu);
979 idle->thread.eip = (unsigned long) start_secondary;
980 /* start_eip had better be page-aligned! */
981 start_eip = setup_trampoline();
984 alternatives_smp_switch(1);
986 /* So we see what's up */
987 printk("Booting processor %d/%d eip %lx\n", cpu, apicid, start_eip);
988 /* Stack for startup_32 can be just as for start_secondary onwards */
989 stack_start.esp = (void *) idle->thread.esp;
993 x86_cpu_to_apicid[cpu] = apicid;
995 * This grunge runs the startup process for
996 * the targeted processor.
999 atomic_set(&init_deasserted, 0);
1001 Dprintk("Setting warm reset code and vector.\n");
1003 store_NMI_vector(&nmi_high, &nmi_low);
1005 smpboot_setup_warm_reset_vector(start_eip);
1008 * Starting actual IPI sequence...
1010 boot_error = wakeup_secondary_cpu(apicid, start_eip);
1014 * allow APs to start initializing.
1016 Dprintk("Before Callout %d.\n", cpu);
1017 cpu_set(cpu, cpu_callout_map);
1018 Dprintk("After Callout %d.\n", cpu);
1021 * Wait 5s total for a response
1023 for (timeout = 0; timeout < 50000; timeout++) {
1024 if (cpu_isset(cpu, cpu_callin_map))
1025 break; /* It has booted */
1029 if (cpu_isset(cpu, cpu_callin_map)) {
1030 /* number CPUs logically, starting from 1 (BSP is 0) */
1032 printk("CPU%d: ", cpu);
1033 print_cpu_info(&cpu_data[cpu]);
1034 Dprintk("CPU has booted.\n");
1037 if (*((volatile unsigned char *)trampoline_base)
1039 /* trampoline started but...? */
1040 printk("Stuck ??\n");
1042 /* trampoline code not run */
1043 printk("Not responding.\n");
1044 inquire_remote_apic(apicid);
1049 /* Try to put things back the way they were before ... */
1050 unmap_cpu_to_logical_apicid(cpu);
1051 cpu_clear(cpu, cpu_callout_map); /* was set here (do_boot_cpu()) */
1052 cpu_clear(cpu, cpu_initialized); /* was set by cpu_init() */
1055 x86_cpu_to_apicid[cpu] = apicid;
1056 cpu_set(cpu, cpu_present_map);
1059 /* mark "stuck" area as not stuck */
1060 *((volatile unsigned long *)trampoline_base) = 0;
1065 #ifdef CONFIG_HOTPLUG_CPU
1066 void cpu_exit_clear(void)
1068 int cpu = raw_smp_processor_id();
1076 cpu_clear(cpu, cpu_callout_map);
1077 cpu_clear(cpu, cpu_callin_map);
1079 cpu_clear(cpu, smp_commenced_mask);
1080 unmap_cpu_to_logical_apicid(cpu);
1083 struct warm_boot_cpu_info {
1084 struct completion *complete;
1085 struct work_struct task;
1090 static void __cpuinit do_warm_boot_cpu(struct work_struct *work)
1092 struct warm_boot_cpu_info *info =
1093 container_of(work, struct warm_boot_cpu_info, task);
1094 do_boot_cpu(info->apicid, info->cpu);
1095 complete(info->complete);
1098 static int __cpuinit __smp_prepare_cpu(int cpu)
1100 DECLARE_COMPLETION_ONSTACK(done);
1101 struct warm_boot_cpu_info info;
1103 struct Xgt_desc_struct *cpu_gdt_descr = &per_cpu(cpu_gdt_descr, cpu);
1105 apicid = x86_cpu_to_apicid[cpu];
1106 if (apicid == BAD_APICID) {
1112 * the CPU isn't initialized at boot time, allocate gdt table here.
1113 * cpu_init will initialize it
1115 if (!cpu_gdt_descr->address) {
1116 cpu_gdt_descr->address = get_zeroed_page(GFP_KERNEL);
1117 if (!cpu_gdt_descr->address)
1118 printk(KERN_CRIT "CPU%d failed to allocate GDT\n", cpu);
1123 info.complete = &done;
1124 info.apicid = apicid;
1126 INIT_WORK(&info.task, do_warm_boot_cpu);
1128 tsc_sync_disabled = 1;
1130 /* init low mem mapping */
1131 clone_pgd_range(swapper_pg_dir, swapper_pg_dir + USER_PGD_PTRS,
1132 min_t(unsigned long, KERNEL_PGD_PTRS, USER_PGD_PTRS));
1134 schedule_work(&info.task);
1135 wait_for_completion(&done);
1137 tsc_sync_disabled = 0;
1145 static void smp_tune_scheduling(void)
1147 unsigned long cachesize; /* kB */
1150 cachesize = boot_cpu_data.x86_cache_size;
1153 max_cache_size = cachesize * 1024;
1158 * Cycle through the processors sending APIC IPIs to boot each.
1161 static int boot_cpu_logical_apicid;
1162 /* Where the IO area was mapped on multiquad, always 0 otherwise */
1164 #ifdef CONFIG_X86_NUMAQ
1165 EXPORT_SYMBOL(xquad_portio);
1168 static void __init smp_boot_cpus(unsigned int max_cpus)
1170 int apicid, cpu, bit, kicked;
1171 unsigned long bogosum = 0;
1174 * Setup boot CPU information
1176 smp_store_cpu_info(0); /* Final full version of the data */
1177 printk("CPU%d: ", 0);
1178 print_cpu_info(&cpu_data[0]);
1180 boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID));
1181 boot_cpu_logical_apicid = logical_smp_processor_id();
1182 x86_cpu_to_apicid[0] = boot_cpu_physical_apicid;
1184 current_thread_info()->cpu = 0;
1185 smp_tune_scheduling();
1187 set_cpu_sibling_map(0);
1190 * If we couldn't find an SMP configuration at boot time,
1191 * get out of here now!
1193 if (!smp_found_config && !acpi_lapic) {
1194 printk(KERN_NOTICE "SMP motherboard not detected.\n");
1195 smpboot_clear_io_apic_irqs();
1196 phys_cpu_present_map = physid_mask_of_physid(0);
1197 if (APIC_init_uniprocessor())
1198 printk(KERN_NOTICE "Local APIC not detected."
1199 " Using dummy APIC emulation.\n");
1200 map_cpu_to_logical_apicid();
1201 cpu_set(0, cpu_sibling_map[0]);
1202 cpu_set(0, cpu_core_map[0]);
1207 * Should not be necessary because the MP table should list the boot
1208 * CPU too, but we do it for the sake of robustness anyway.
1209 * Makes no sense to do this check in clustered apic mode, so skip it
1211 if (!check_phys_apicid_present(boot_cpu_physical_apicid)) {
1212 printk("weird, boot CPU (#%d) not listed by the BIOS.\n",
1213 boot_cpu_physical_apicid);
1214 physid_set(hard_smp_processor_id(), phys_cpu_present_map);
1218 * If we couldn't find a local APIC, then get out of here now!
1220 if (APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid]) && !cpu_has_apic) {
1221 printk(KERN_ERR "BIOS bug, local APIC #%d not detected!...\n",
1222 boot_cpu_physical_apicid);
1223 printk(KERN_ERR "... forcing use of dummy APIC emulation. (tell your hw vendor)\n");
1224 smpboot_clear_io_apic_irqs();
1225 phys_cpu_present_map = physid_mask_of_physid(0);
1226 cpu_set(0, cpu_sibling_map[0]);
1227 cpu_set(0, cpu_core_map[0]);
1231 verify_local_APIC();
1234 * If SMP should be disabled, then really disable it!
1237 smp_found_config = 0;
1238 printk(KERN_INFO "SMP mode deactivated, forcing use of dummy APIC emulation.\n");
1239 smpboot_clear_io_apic_irqs();
1240 phys_cpu_present_map = physid_mask_of_physid(0);
1241 cpu_set(0, cpu_sibling_map[0]);
1242 cpu_set(0, cpu_core_map[0]);
1248 map_cpu_to_logical_apicid();
1251 setup_portio_remap();
1254 * Scan the CPU present map and fire up the other CPUs via do_boot_cpu
1256 * In clustered apic mode, phys_cpu_present_map is a constructed thus:
1257 * bits 0-3 are quad0, 4-7 are quad1, etc. A perverse twist on the
1258 * clustered apic ID.
1260 Dprintk("CPU present map: %lx\n", physids_coerce(phys_cpu_present_map));
1263 for (bit = 0; kicked < NR_CPUS && bit < MAX_APICS; bit++) {
1264 apicid = cpu_present_to_apicid(bit);
1266 * Don't even attempt to start the boot CPU!
1268 if ((apicid == boot_cpu_apicid) || (apicid == BAD_APICID))
1271 if (!check_apicid_present(bit))
1273 if (max_cpus <= cpucount+1)
1276 if (((cpu = alloc_cpu_id()) <= 0) || do_boot_cpu(apicid, cpu))
1277 printk("CPU #%d not responding - cannot use it.\n",
1284 * Cleanup possible dangling ends...
1286 smpboot_restore_warm_reset_vector();
1289 * Allow the user to impress friends.
1291 Dprintk("Before bogomips.\n");
1292 for (cpu = 0; cpu < NR_CPUS; cpu++)
1293 if (cpu_isset(cpu, cpu_callout_map))
1294 bogosum += cpu_data[cpu].loops_per_jiffy;
1296 "Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
1298 bogosum/(500000/HZ),
1299 (bogosum/(5000/HZ))%100);
1301 Dprintk("Before bogocount - setting activated=1.\n");
1304 printk(KERN_WARNING "WARNING: SMP operation may be unreliable with B stepping processors.\n");
1307 * Don't taint if we are running SMP kernel on a single non-MP
1310 if (tainted & TAINT_UNSAFE_SMP) {
1312 printk (KERN_INFO "WARNING: This combination of AMD processors is not suitable for SMP.\n");
1314 tainted &= ~TAINT_UNSAFE_SMP;
1317 Dprintk("Boot done.\n");
1320 * construct cpu_sibling_map[], so that we can tell sibling CPUs
1323 for (cpu = 0; cpu < NR_CPUS; cpu++) {
1324 cpus_clear(cpu_sibling_map[cpu]);
1325 cpus_clear(cpu_core_map[cpu]);
1328 cpu_set(0, cpu_sibling_map[0]);
1329 cpu_set(0, cpu_core_map[0]);
1331 smpboot_setup_io_apic();
1336 * Synchronize the TSC with the AP
1338 if (cpu_has_tsc && cpucount && cpu_khz)
1339 synchronize_tsc_bp();
1342 /* These are wrappers to interface to the new boot process. Someone
1343 who understands all this stuff should rewrite it properly. --RR 15/Jul/02 */
1344 void __init smp_prepare_cpus(unsigned int max_cpus)
1346 smp_commenced_mask = cpumask_of_cpu(0);
1347 cpu_callin_map = cpumask_of_cpu(0);
1349 smp_boot_cpus(max_cpus);
1352 void __devinit smp_prepare_boot_cpu(void)
1354 cpu_set(smp_processor_id(), cpu_online_map);
1355 cpu_set(smp_processor_id(), cpu_callout_map);
1356 cpu_set(smp_processor_id(), cpu_present_map);
1357 cpu_set(smp_processor_id(), cpu_possible_map);
1358 per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
1361 #ifdef CONFIG_HOTPLUG_CPU
1363 remove_siblinginfo(int cpu)
1366 struct cpuinfo_x86 *c = cpu_data;
1368 for_each_cpu_mask(sibling, cpu_core_map[cpu]) {
1369 cpu_clear(cpu, cpu_core_map[sibling]);
1371 * last thread sibling in this cpu core going down
1373 if (cpus_weight(cpu_sibling_map[cpu]) == 1)
1374 c[sibling].booted_cores--;
1377 for_each_cpu_mask(sibling, cpu_sibling_map[cpu])
1378 cpu_clear(cpu, cpu_sibling_map[sibling]);
1379 cpus_clear(cpu_sibling_map[cpu]);
1380 cpus_clear(cpu_core_map[cpu]);
1381 c[cpu].phys_proc_id = 0;
1382 c[cpu].cpu_core_id = 0;
1383 cpu_clear(cpu, cpu_sibling_setup_map);
1386 int __cpu_disable(void)
1388 cpumask_t map = cpu_online_map;
1389 int cpu = smp_processor_id();
1392 * Perhaps use cpufreq to drop frequency, but that could go
1393 * into generic code.
1395 * We won't take down the boot processor on i386 due to some
1396 * interrupts only being able to be serviced by the BSP.
1397 * Especially so if we're not using an IOAPIC -zwane
1401 if (nmi_watchdog == NMI_LOCAL_APIC)
1402 stop_apic_nmi_watchdog(NULL);
1404 /* Allow any queued timer interrupts to get serviced */
1407 local_irq_disable();
1409 remove_siblinginfo(cpu);
1411 cpu_clear(cpu, map);
1413 /* It's now safe to remove this processor from the online map */
1414 cpu_clear(cpu, cpu_online_map);
1418 void __cpu_die(unsigned int cpu)
1420 /* We don't do anything here: idle task is faking death itself. */
1423 for (i = 0; i < 10; i++) {
1424 /* They ack this in play_dead by setting CPU_DEAD */
1425 if (per_cpu(cpu_state, cpu) == CPU_DEAD) {
1426 printk ("CPU %d is now offline\n", cpu);
1427 if (1 == num_online_cpus())
1428 alternatives_smp_switch(0);
1433 printk(KERN_ERR "CPU %u didn't die...\n", cpu);
1435 #else /* ... !CONFIG_HOTPLUG_CPU */
1436 int __cpu_disable(void)
1441 void __cpu_die(unsigned int cpu)
1443 /* We said "no" in __cpu_disable */
1446 #endif /* CONFIG_HOTPLUG_CPU */
1448 int __cpuinit __cpu_up(unsigned int cpu)
1450 #ifdef CONFIG_HOTPLUG_CPU
1454 * We do warm boot only on cpus that had booted earlier
1455 * Otherwise cold boot is all handled from smp_boot_cpus().
1456 * cpu_callin_map is set during AP kickstart process. Its reset
1457 * when a cpu is taken offline from cpu_exit_clear().
1459 if (!cpu_isset(cpu, cpu_callin_map))
1460 ret = __smp_prepare_cpu(cpu);
1466 /* In case one didn't come up */
1467 if (!cpu_isset(cpu, cpu_callin_map)) {
1468 printk(KERN_DEBUG "skipping cpu%d, didn't come online\n", cpu);
1474 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
1475 /* Unleash the CPU! */
1476 cpu_set(cpu, smp_commenced_mask);
1477 while (!cpu_isset(cpu, cpu_online_map))
1480 #ifdef CONFIG_X86_GENERICARCH
1481 if (num_online_cpus() > 8 && genapic == &apic_default)
1482 panic("Default flat APIC routing can't be used with > 8 cpus\n");
1488 void __init smp_cpus_done(unsigned int max_cpus)
1490 #ifdef CONFIG_X86_IO_APIC
1491 setup_ioapic_dest();
1494 #ifndef CONFIG_HOTPLUG_CPU
1496 * Disable executability of the SMP trampoline:
1498 set_kernel_exec((unsigned long)trampoline_base, trampoline_exec);
1502 void __init smp_intr_init(void)
1505 * IRQ0 must be given a fixed assignment and initialized,
1506 * because it's used before the IO-APIC is set up.
1508 set_intr_gate(FIRST_DEVICE_VECTOR, interrupt[0]);
1511 * The reschedule interrupt is a CPU-to-CPU reschedule-helper
1512 * IPI, driven by wakeup.
1514 set_intr_gate(RESCHEDULE_VECTOR, reschedule_interrupt);
1516 /* IPI for invalidation */
1517 set_intr_gate(INVALIDATE_TLB_VECTOR, invalidate_interrupt);
1519 /* IPI for generic function call */
1520 set_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt);
1524 * If the BIOS enumerates physical processors before logical,
1525 * maxcpus=N at enumeration-time can be used to disable HT.
1527 static int __init parse_maxcpus(char *arg)
1529 extern unsigned int maxcpus;
1531 maxcpus = simple_strtoul(arg, NULL, 0);
1534 early_param("maxcpus", parse_maxcpus);