]> err.no Git - linux-2.6/blobdiff - arch/sparc64/kernel/smp.c
Merge branch 'for-2.6.26' of master.kernel.org:/pub/scm/linux/kernel/git/jwboyer...
[linux-2.6] / arch / sparc64 / kernel / smp.c
index cc454731d87959a4ececb9a7355dd93befe03822..3aba47624df45aa0e773c169fe6498b5bb5e377e 100644 (file)
@@ -1,6 +1,6 @@
 /* smp.c: Sparc64 SMP support.
  *
- * Copyright (C) 1997, 2007 David S. Miller (davem@davemloft.net)
+ * Copyright (C) 1997, 2007, 2008 David S. Miller (davem@davemloft.net)
  */
 
 #include <linux/module.h>
@@ -20,7 +20,7 @@
 #include <linux/cache.h>
 #include <linux/jiffies.h>
 #include <linux/profile.h>
-#include <linux/bootmem.h>
+#include <linux/lmb.h>
 
 #include <asm/head.h>
 #include <asm/ptrace.h>
@@ -30,6 +30,7 @@
 #include <asm/cpudata.h>
 #include <asm/hvtramp.h>
 #include <asm/io.h>
+#include <asm/timer.h>
 
 #include <asm/irq.h>
 #include <asm/irq_regs.h>
@@ -37,7 +38,6 @@
 #include <asm/pgtable.h>
 #include <asm/oplib.h>
 #include <asm/uaccess.h>
-#include <asm/timer.h>
 #include <asm/starfire.h>
 #include <asm/tlb.h>
 #include <asm/sections.h>
@@ -284,14 +284,17 @@ static void ldom_startcpu_cpuid(unsigned int cpu, unsigned long thread_reg)
 {
        extern unsigned long sparc64_ttable_tl0;
        extern unsigned long kern_locked_tte_data;
-       extern int bigkernel;
        struct hvtramp_descr *hdesc;
        unsigned long trampoline_ra;
        struct trap_per_cpu *tb;
        u64 tte_vaddr, tte_data;
        unsigned long hv_err;
+       int i;
 
-       hdesc = kzalloc(sizeof(*hdesc), GFP_KERNEL);
+       hdesc = kzalloc(sizeof(*hdesc) +
+                       (sizeof(struct hvtramp_mapping) *
+                        num_kernel_image_mappings - 1),
+                       GFP_KERNEL);
        if (!hdesc) {
                printk(KERN_ERR "ldom_startcpu_cpuid: Cannot allocate "
                       "hvtramp_descr.\n");
@@ -299,7 +302,7 @@ static void ldom_startcpu_cpuid(unsigned int cpu, unsigned long thread_reg)
        }
 
        hdesc->cpu = cpu;
-       hdesc->num_mappings = (bigkernel ? 2 : 1);
+       hdesc->num_mappings = num_kernel_image_mappings;
 
        tb = &trap_block[cpu];
        tb->hdesc = hdesc;
@@ -312,13 +315,11 @@ static void ldom_startcpu_cpuid(unsigned int cpu, unsigned long thread_reg)
        tte_vaddr = (unsigned long) KERNBASE;
        tte_data = kern_locked_tte_data;
 
-       hdesc->maps[0].vaddr = tte_vaddr;
-       hdesc->maps[0].tte   = tte_data;
-       if (bigkernel) {
+       for (i = 0; i < hdesc->num_mappings; i++) {
+               hdesc->maps[i].vaddr = tte_vaddr;
+               hdesc->maps[i].tte   = tte_data;
                tte_vaddr += 0x400000;
                tte_data  += 0x400000;
-               hdesc->maps[1].vaddr = tte_vaddr;
-               hdesc->maps[1].tte   = tte_data;
        }
 
        trampoline_ra = kimage_addr_to_ra(hv_cpu_startup);
@@ -864,14 +865,21 @@ void smp_call_function_client(int irq, struct pt_regs *regs)
        void *info = call_data->info;
 
        clear_softint(1 << irq);
+
+       irq_enter();
+
+       if (!call_data->wait) {
+               /* let initiator proceed after getting data */
+               atomic_inc(&call_data->finished);
+       }
+
+       func(info);
+
+       irq_exit();
+
        if (call_data->wait) {
                /* let initiator proceed only after completion */
-               func(info);
-               atomic_inc(&call_data->finished);
-       } else {
-               /* let initiator proceed after getting data */
                atomic_inc(&call_data->finished);
-               func(info);
        }
 }
 
@@ -901,6 +909,9 @@ extern unsigned long xcall_flush_tlb_kernel_range;
 extern unsigned long xcall_report_regs;
 extern unsigned long xcall_receive_signal;
 extern unsigned long xcall_new_mmu_context_version;
+#ifdef CONFIG_KGDB
+extern unsigned long xcall_kgdb_capture;
+#endif
 
 #ifdef DCACHE_ALIASING_POSSIBLE
 extern unsigned long xcall_flush_dcache_page_cheetah;
@@ -1030,7 +1041,9 @@ void smp_receive_signal(int cpu)
 
 void smp_receive_signal_client(int irq, struct pt_regs *regs)
 {
+       irq_enter();
        clear_softint(1 << irq);
+       irq_exit();
 }
 
 void smp_new_mmu_context_version_client(int irq, struct pt_regs *regs)
@@ -1038,6 +1051,8 @@ void smp_new_mmu_context_version_client(int irq, struct pt_regs *regs)
        struct mm_struct *mm;
        unsigned long flags;
 
+       irq_enter();
+
        clear_softint(1 << irq);
 
        /* See if we need to allocate a new TLB context because
@@ -1057,6 +1072,8 @@ void smp_new_mmu_context_version_client(int irq, struct pt_regs *regs)
        load_secondary_context(mm);
        __flush_tlb_mm(CTX_HWBITS(mm->context),
                       SECONDARY_CONTEXT);
+
+       irq_exit();
 }
 
 void smp_new_mmu_context_version(void)
@@ -1064,6 +1081,13 @@ void smp_new_mmu_context_version(void)
        smp_cross_call(&xcall_new_mmu_context_version, 0, 0, 0);
 }
 
+#ifdef CONFIG_KGDB
+void kgdb_roundup_cpus(unsigned long flags)
+{
+       smp_cross_call(&xcall_kgdb_capture, 0, 0, 0);
+}
+#endif
+
 void smp_report_regs(void)
 {
        smp_cross_call(&xcall_report_regs, 0, 0, 0);
@@ -1215,6 +1239,8 @@ void smp_penguin_jailcell(int irq, struct pt_regs *regs)
 {
        clear_softint(1 << irq);
 
+       irq_enter();
+
        preempt_disable();
 
        __asm__ __volatile__("flushw");
@@ -1227,6 +1253,8 @@ void smp_penguin_jailcell(int irq, struct pt_regs *regs)
        prom_world(0);
 
        preempt_enable();
+
+       irq_exit();
 }
 
 /* /proc/profile writes can call this, don't __init it please. */
@@ -1429,7 +1457,7 @@ EXPORT_SYMBOL(__per_cpu_shift);
 
 void __init real_setup_per_cpu_areas(void)
 {
-       unsigned long goal, size, i;
+       unsigned long paddr, goal, size, i;
        char *ptr;
 
        /* Copy section for each CPU (we discard the original) */
@@ -1439,8 +1467,13 @@ void __init real_setup_per_cpu_areas(void)
        for (size = PAGE_SIZE; size < goal; size <<= 1UL)
                __per_cpu_shift++;
 
-       ptr = alloc_bootmem_pages(size * NR_CPUS);
+       paddr = lmb_alloc(size * NR_CPUS, PAGE_SIZE);
+       if (!paddr) {
+               prom_printf("Cannot allocate per-cpu memory.\n");
+               prom_halt();
+       }
 
+       ptr = __va(paddr);
        __per_cpu_base = ptr - __per_cpu_start;
 
        for (i = 0; i < NR_CPUS; i++, ptr += size)