From 6becedbb06072c5741d4057b9facecb4b3143711 Mon Sep 17 00:00:00 2001 From: Glauber de Oliveira Costa Date: Wed, 19 Mar 2008 14:25:51 -0300 Subject: [PATCH] x86: minor adjustments for do_boot_cpu This patch provides minor adjustments for do_boot_cpus in both architectures to allow for integration Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- arch/x86/kernel/smpboot_32.c | 22 ++++++++++++++-------- arch/x86/kernel/smpboot_64.c | 15 ++++++--------- 2 files changed, 20 insertions(+), 17 deletions(-) diff --git a/arch/x86/kernel/smpboot_32.c b/arch/x86/kernel/smpboot_32.c index bd2f8863ef..5165b11d8a 100644 --- a/arch/x86/kernel/smpboot_32.c +++ b/arch/x86/kernel/smpboot_32.c @@ -556,7 +556,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu) * Returns zero if CPU booted OK, else error code from wakeup_secondary_cpu. */ { - unsigned long boot_error; + unsigned long boot_error = 0; int timeout; unsigned long start_eip; unsigned short nmi_high = 0, nmi_low = 0; @@ -566,11 +566,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu) }; INIT_WORK(&c_idle.work, do_fork_idle); - /* - * Save current MTRR state in case it was changed since early boot - * (e.g. by the ACPI SMI) to initialize new CPUs with MTRRs in sync: - */ - mtrr_save_state(); + alternatives_smp_switch(1); c_idle.idle = get_idle_for_cpu(cpu); @@ -607,8 +603,6 @@ do_rest: /* start_eip had better be page-aligned! */ start_eip = setup_trampoline(); - alternatives_smp_switch(1); - /* So we see what's up */ printk("Booting processor %d/%d ip %lx\n", cpu, apicid, start_eip); /* Stack for startup_32 can be just as for start_secondary onwards */ @@ -628,6 +622,12 @@ do_rest: store_NMI_vector(&nmi_high, &nmi_low); smpboot_setup_warm_reset_vector(start_eip); + /* + * Be paranoid about clearing APIC errors. + */ + apic_write(APIC_ESR, 0); + apic_read(APIC_ESR); + /* * Starting actual IPI sequence... @@ -864,6 +864,12 @@ int __cpuinit native_cpu_up(unsigned int cpu) return -EINVAL; } + /* + * Save current MTRR state in case it was changed since early boot + * (e.g. by the ACPI SMI) to initialize new CPUs with MTRRs in sync: + */ + mtrr_save_state(); + per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; __smp_prepare_cpu(cpu); diff --git a/arch/x86/kernel/smpboot_64.c b/arch/x86/kernel/smpboot_64.c index e93fff42ec..7d1b4cb380 100644 --- a/arch/x86/kernel/smpboot_64.c +++ b/arch/x86/kernel/smpboot_64.c @@ -432,7 +432,7 @@ static void __cpuinit do_fork_idle(struct work_struct *work) */ static int __cpuinit do_boot_cpu(int cpu, int apicid) { - unsigned long boot_error; + unsigned long boot_error = 0; int timeout; unsigned long start_rip; struct create_idle c_idle = { @@ -530,11 +530,6 @@ do_rest: apic_write(APIC_ESR, 0); apic_read(APIC_ESR); - /* - * Status is now clean - */ - boot_error = 0; - /* * Starting actual IPI sequence... */ @@ -564,7 +559,7 @@ do_rest: print_cpu_info(&cpu_data(cpu)); } else { boot_error = 1; - if (*((volatile unsigned char *)phys_to_virt(SMP_TRAMPOLINE_BASE)) + if (*((volatile unsigned char *)trampoline_base) == 0xA5) /* trampoline started but...? */ printk("Stuck ??\n"); @@ -583,10 +578,12 @@ do_rest: cpu_clear(cpu, cpu_present_map); cpu_clear(cpu, cpu_possible_map); per_cpu(x86_cpu_to_apicid, cpu) = BAD_APICID; - return -EIO; } - return 0; + /* mark "stuck" area as not stuck */ + *((volatile unsigned long *)trampoline_base) = 0; + + return boot_error; } cycles_t cacheflush_time; -- 2.39.5