X-Git-Url: https://err.no/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=arch%2Fx86_64%2Fkernel%2Fsmpboot.c;h=fa25e39fe54dfc4aab4390c95b45039da0202558;hb=97c169a21bfb5bb2ab2bccd852da4f0d0e021c55;hp=db27e1844ed24aeec4eaf4f3c8985a0500236ac7;hpb=61b1b2d0239da82c0bed8adaa1d070c6551d4afd;p=linux-2.6 diff --git a/arch/x86_64/kernel/smpboot.c b/arch/x86_64/kernel/smpboot.c index db27e1844e..fa25e39fe5 100644 --- a/arch/x86_64/kernel/smpboot.c +++ b/arch/x86_64/kernel/smpboot.c @@ -211,9 +211,6 @@ static __cpuinit void sync_master(void *arg) { unsigned long flags, i; - if (smp_processor_id() != 0) - return; - go[MASTER] = 0; local_irq_save(flags); @@ -262,7 +259,7 @@ get_delta(long *rt, long *master) return tcenter - best_tm; } -static __cpuinit void sync_tsc(void) +static __cpuinit void sync_tsc(unsigned int master) { int i, done = 0; long delta, adj, adjust_latency = 0; @@ -276,9 +273,17 @@ static __cpuinit void sync_tsc(void) } t[NUM_ROUNDS] __cpuinitdata; #endif + printk(KERN_INFO "CPU %d: Syncing TSC to CPU %u.\n", + smp_processor_id(), master); + go[MASTER] = 1; - smp_call_function(sync_master, NULL, 1, 0); + /* It is dangerous to broadcast IPI as cpus are coming up, + * as they may not be ready to accept them. So since + * we only need to send the ipi to the boot cpu direct + * the message, and avoid the race. + */ + smp_call_function_single(master, sync_master, NULL, 1, 0); while (go[MASTER]) /* wait for master to be ready */ no_cpu_relax(); @@ -322,16 +327,14 @@ static __cpuinit void sync_tsc(void) printk(KERN_INFO "CPU %d: synchronized TSC with CPU %u (last diff %ld cycles, " "maxerr %lu cycles)\n", - smp_processor_id(), boot_cpu_id, delta, rt); + smp_processor_id(), master, delta, rt); } static void __cpuinit tsc_sync_wait(void) { if (notscsync || !cpu_has_tsc) return; - printk(KERN_INFO "CPU %d: Syncing TSC to CPU %u.\n", smp_processor_id(), - boot_cpu_id); - sync_tsc(); + sync_tsc(0); } static __init int notscsync_setup(char *s) @@ -489,6 +492,14 @@ void __cpuinit start_secondary(void) */ set_cpu_sibling_map(smp_processor_id()); + /* + * Wait for TSC sync to not schedule things before. + * We still process interrupts, which could see an inconsistent + * time in that window unfortunately. + * Do this here because TSC sync has global unprotected state. + */ + tsc_sync_wait(); + /* * We need to hold call_lock, so there is no inconsistency * between the time smp_call_function() determines number of @@ -506,13 +517,6 @@ void __cpuinit start_secondary(void) per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; unlock_ipi_call_lock(); - mb(); - - /* Wait for TSC sync to not schedule things before. - We still process interrupts, which could see an inconsistent - time in that window unfortunately. */ - tsc_sync_wait(); - cpu_idle(); } @@ -755,8 +759,9 @@ do_rest: initial_code = start_secondary; clear_ti_thread_flag(c_idle.idle->thread_info, TIF_FORK); - printk(KERN_INFO "Booting processor %d/%d rip %lx rsp %lx\n", cpu, apicid, - start_rip, init_rsp); + printk(KERN_INFO "Booting processor %d/%d APIC 0x%x\n", cpu, + cpus_weight(cpu_present_map), + apicid); /* * This grunge runs the startup process for @@ -1182,8 +1187,7 @@ void __cpu_die(unsigned int cpu) printk ("CPU %d is now offline\n", cpu); return; } - current->state = TASK_UNINTERRUPTIBLE; - schedule_timeout(HZ/10); + msleep(100); } printk(KERN_ERR "CPU %u didn't die...\n", cpu); }