]> err.no Git - linux-2.6/blobdiff - arch/mips/kernel/smp.c
Blackfin arch: gpio pinmux and resource allocation API required by BF537 on chip...
[linux-2.6] / arch / mips / kernel / smp.c
index 67edfa7ed93a643d4a59af04d9a6c98a8d1ca12f..73b0dab026687751266bd16bc15a276037d9d5b8 100644 (file)
@@ -30,6 +30,7 @@
 #include <linux/sched.h>
 #include <linux/cpumask.h>
 #include <linux/cpu.h>
+#include <linux/err.h>
 
 #include <asm/atomic.h>
 #include <asm/cpu.h>
@@ -51,18 +52,8 @@ int __cpu_logical_map[NR_CPUS];              /* Map logical to physical */
 EXPORT_SYMBOL(phys_cpu_present_map);
 EXPORT_SYMBOL(cpu_online_map);
 
-/* This happens early in bootup, can't really do it better */
-static void smp_tune_scheduling (void)
-{
-       struct cache_desc *cd = &current_cpu_data.scache;
-       unsigned long cachesize = cd->linesz * cd->sets * cd->ways;
-
-       if (cachesize > max_cache_size)
-               max_cache_size = cachesize;
-}
-
 extern void __init calibrate_delay(void);
-extern ATTRIB_NORET void cpu_idle(void);
+extern void cpu_idle(void);
 
 /*
  * First C code run on the secondary CPUs after being started up by
@@ -203,6 +194,61 @@ void smp_call_function_interrupt(void)
        }
 }
 
+int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
+                            int retry, int wait)
+{
+       struct call_data_struct data;
+       int me;
+
+       /*
+        * Can die spectacularly if this CPU isn't yet marked online
+        */
+       if (!cpu_online(cpu))
+               return 0;
+
+       me = get_cpu();
+       BUG_ON(!cpu_online(me));
+
+       if (cpu == me) {
+               local_irq_disable();
+               func(info);
+               local_irq_enable();
+               put_cpu();
+               return 0;
+       }
+
+       /* Can deadlock when called with interrupts disabled */
+       WARN_ON(irqs_disabled());
+
+       data.func = func;
+       data.info = info;
+       atomic_set(&data.started, 0);
+       data.wait = wait;
+       if (wait)
+               atomic_set(&data.finished, 0);
+
+       spin_lock(&smp_call_lock);
+       call_data = &data;
+       smp_mb();
+
+       /* Send a message to the other CPU */
+       core_send_ipi(cpu, SMP_CALL_FUNCTION);
+
+       /* Wait for response */
+       /* FIXME: lock-up detection, backtrace on lock-up */
+       while (atomic_read(&data.started) != 1)
+               barrier();
+
+       if (wait)
+               while (atomic_read(&data.finished) != 1)
+                       barrier();
+       call_data = NULL;
+       spin_unlock(&smp_call_lock);
+
+       put_cpu();
+       return 0;
+}
+
 static void stop_this_cpu(void *dummy)
 {
        /*
@@ -228,7 +274,6 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
 {
        init_new_context(current, &init_mm);
        current_thread_info()->cpu = 0;
-       smp_tune_scheduling();
        plat_prepare_cpus(max_cpus);
 #ifndef CONFIG_HOTPLUG_CPU
        cpu_present_map = cpu_possible_map;