]> err.no Git - linux-2.6/blob - arch/sparc64/kernel/smp.c
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dtor/input
[linux-2.6] / arch / sparc64 / kernel / smp.c
1 /* smp.c: Sparc64 SMP support.
2  *
3  * Copyright (C) 1997, 2007, 2008 David S. Miller (davem@davemloft.net)
4  */
5
6 #include <linux/module.h>
7 #include <linux/kernel.h>
8 #include <linux/sched.h>
9 #include <linux/mm.h>
10 #include <linux/pagemap.h>
11 #include <linux/threads.h>
12 #include <linux/smp.h>
13 #include <linux/interrupt.h>
14 #include <linux/kernel_stat.h>
15 #include <linux/delay.h>
16 #include <linux/init.h>
17 #include <linux/spinlock.h>
18 #include <linux/fs.h>
19 #include <linux/seq_file.h>
20 #include <linux/cache.h>
21 #include <linux/jiffies.h>
22 #include <linux/profile.h>
23 #include <linux/lmb.h>
24
25 #include <asm/head.h>
26 #include <asm/ptrace.h>
27 #include <asm/atomic.h>
28 #include <asm/tlbflush.h>
29 #include <asm/mmu_context.h>
30 #include <asm/cpudata.h>
31 #include <asm/hvtramp.h>
32 #include <asm/io.h>
33 #include <asm/timer.h>
34
35 #include <asm/irq.h>
36 #include <asm/irq_regs.h>
37 #include <asm/page.h>
38 #include <asm/pgtable.h>
39 #include <asm/oplib.h>
40 #include <asm/uaccess.h>
41 #include <asm/timer.h>
42 #include <asm/starfire.h>
43 #include <asm/tlb.h>
44 #include <asm/sections.h>
45 #include <asm/prom.h>
46 #include <asm/mdesc.h>
47 #include <asm/ldc.h>
48 #include <asm/hypervisor.h>
49
50 int sparc64_multi_core __read_mostly;
51
52 cpumask_t cpu_possible_map __read_mostly = CPU_MASK_NONE;
53 cpumask_t cpu_online_map __read_mostly = CPU_MASK_NONE;
54 DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE;
55 cpumask_t cpu_core_map[NR_CPUS] __read_mostly =
56         { [0 ... NR_CPUS-1] = CPU_MASK_NONE };
57
58 EXPORT_SYMBOL(cpu_possible_map);
59 EXPORT_SYMBOL(cpu_online_map);
60 EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
61 EXPORT_SYMBOL(cpu_core_map);
62
63 static cpumask_t smp_commenced_mask;
64
65 void smp_info(struct seq_file *m)
66 {
67         int i;
68         
69         seq_printf(m, "State:\n");
70         for_each_online_cpu(i)
71                 seq_printf(m, "CPU%d:\t\tonline\n", i);
72 }
73
74 void smp_bogo(struct seq_file *m)
75 {
76         int i;
77         
78         for_each_online_cpu(i)
79                 seq_printf(m,
80                            "Cpu%dClkTck\t: %016lx\n",
81                            i, cpu_data(i).clock_tick);
82 }
83
84 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(call_lock);
85
86 extern void setup_sparc64_timer(void);
87
88 static volatile unsigned long callin_flag = 0;
89
90 void __cpuinit smp_callin(void)
91 {
92         int cpuid = hard_smp_processor_id();
93
94         __local_per_cpu_offset = __per_cpu_offset(cpuid);
95
96         if (tlb_type == hypervisor)
97                 sun4v_ktsb_register();
98
99         __flush_tlb_all();
100
101         setup_sparc64_timer();
102
103         if (cheetah_pcache_forced_on)
104                 cheetah_enable_pcache();
105
106         local_irq_enable();
107
108         callin_flag = 1;
109         __asm__ __volatile__("membar #Sync\n\t"
110                              "flush  %%g6" : : : "memory");
111
112         /* Clear this or we will die instantly when we
113          * schedule back to this idler...
114          */
115         current_thread_info()->new_child = 0;
116
117         /* Attach to the address space of init_task. */
118         atomic_inc(&init_mm.mm_count);
119         current->active_mm = &init_mm;
120
121         while (!cpu_isset(cpuid, smp_commenced_mask))
122                 rmb();
123
124         spin_lock(&call_lock);
125         cpu_set(cpuid, cpu_online_map);
126         spin_unlock(&call_lock);
127
128         /* idle thread is expected to have preempt disabled */
129         preempt_disable();
130 }
131
132 void cpu_panic(void)
133 {
134         printk("CPU[%d]: Returns from cpu_idle!\n", smp_processor_id());
135         panic("SMP bolixed\n");
136 }
137
138 /* This tick register synchronization scheme is taken entirely from
139  * the ia64 port, see arch/ia64/kernel/smpboot.c for details and credit.
140  *
141  * The only change I've made is to rework it so that the master
142  * initiates the synchonization instead of the slave. -DaveM
143  */
144
145 #define MASTER  0
146 #define SLAVE   (SMP_CACHE_BYTES/sizeof(unsigned long))
147
148 #define NUM_ROUNDS      64      /* magic value */
149 #define NUM_ITERS       5       /* likewise */
150
151 static DEFINE_SPINLOCK(itc_sync_lock);
152 static unsigned long go[SLAVE + 1];
153
154 #define DEBUG_TICK_SYNC 0
155
156 static inline long get_delta (long *rt, long *master)
157 {
158         unsigned long best_t0 = 0, best_t1 = ~0UL, best_tm = 0;
159         unsigned long tcenter, t0, t1, tm;
160         unsigned long i;
161
162         for (i = 0; i < NUM_ITERS; i++) {
163                 t0 = tick_ops->get_tick();
164                 go[MASTER] = 1;
165                 membar_storeload();
166                 while (!(tm = go[SLAVE]))
167                         rmb();
168                 go[SLAVE] = 0;
169                 wmb();
170                 t1 = tick_ops->get_tick();
171
172                 if (t1 - t0 < best_t1 - best_t0)
173                         best_t0 = t0, best_t1 = t1, best_tm = tm;
174         }
175
176         *rt = best_t1 - best_t0;
177         *master = best_tm - best_t0;
178
179         /* average best_t0 and best_t1 without overflow: */
180         tcenter = (best_t0/2 + best_t1/2);
181         if (best_t0 % 2 + best_t1 % 2 == 2)
182                 tcenter++;
183         return tcenter - best_tm;
184 }
185
186 void smp_synchronize_tick_client(void)
187 {
188         long i, delta, adj, adjust_latency = 0, done = 0;
189         unsigned long flags, rt, master_time_stamp, bound;
190 #if DEBUG_TICK_SYNC
191         struct {
192                 long rt;        /* roundtrip time */
193                 long master;    /* master's timestamp */
194                 long diff;      /* difference between midpoint and master's timestamp */
195                 long lat;       /* estimate of itc adjustment latency */
196         } t[NUM_ROUNDS];
197 #endif
198
199         go[MASTER] = 1;
200
201         while (go[MASTER])
202                 rmb();
203
204         local_irq_save(flags);
205         {
206                 for (i = 0; i < NUM_ROUNDS; i++) {
207                         delta = get_delta(&rt, &master_time_stamp);
208                         if (delta == 0) {
209                                 done = 1;       /* let's lock on to this... */
210                                 bound = rt;
211                         }
212
213                         if (!done) {
214                                 if (i > 0) {
215                                         adjust_latency += -delta;
216                                         adj = -delta + adjust_latency/4;
217                                 } else
218                                         adj = -delta;
219
220                                 tick_ops->add_tick(adj);
221                         }
222 #if DEBUG_TICK_SYNC
223                         t[i].rt = rt;
224                         t[i].master = master_time_stamp;
225                         t[i].diff = delta;
226                         t[i].lat = adjust_latency/4;
227 #endif
228                 }
229         }
230         local_irq_restore(flags);
231
232 #if DEBUG_TICK_SYNC
233         for (i = 0; i < NUM_ROUNDS; i++)
234                 printk("rt=%5ld master=%5ld diff=%5ld adjlat=%5ld\n",
235                        t[i].rt, t[i].master, t[i].diff, t[i].lat);
236 #endif
237
238         printk(KERN_INFO "CPU %d: synchronized TICK with master CPU "
239                "(last diff %ld cycles, maxerr %lu cycles)\n",
240                smp_processor_id(), delta, rt);
241 }
242
243 static void smp_start_sync_tick_client(int cpu);
244
245 static void smp_synchronize_one_tick(int cpu)
246 {
247         unsigned long flags, i;
248
249         go[MASTER] = 0;
250
251         smp_start_sync_tick_client(cpu);
252
253         /* wait for client to be ready */
254         while (!go[MASTER])
255                 rmb();
256
257         /* now let the client proceed into his loop */
258         go[MASTER] = 0;
259         membar_storeload();
260
261         spin_lock_irqsave(&itc_sync_lock, flags);
262         {
263                 for (i = 0; i < NUM_ROUNDS*NUM_ITERS; i++) {
264                         while (!go[MASTER])
265                                 rmb();
266                         go[MASTER] = 0;
267                         wmb();
268                         go[SLAVE] = tick_ops->get_tick();
269                         membar_storeload();
270                 }
271         }
272         spin_unlock_irqrestore(&itc_sync_lock, flags);
273 }
274
275 #if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU)
276 /* XXX Put this in some common place. XXX */
277 static unsigned long kimage_addr_to_ra(void *p)
278 {
279         unsigned long val = (unsigned long) p;
280
281         return kern_base + (val - KERNBASE);
282 }
283
284 static void ldom_startcpu_cpuid(unsigned int cpu, unsigned long thread_reg)
285 {
286         extern unsigned long sparc64_ttable_tl0;
287         extern unsigned long kern_locked_tte_data;
288         struct hvtramp_descr *hdesc;
289         unsigned long trampoline_ra;
290         struct trap_per_cpu *tb;
291         u64 tte_vaddr, tte_data;
292         unsigned long hv_err;
293         int i;
294
295         hdesc = kzalloc(sizeof(*hdesc) +
296                         (sizeof(struct hvtramp_mapping) *
297                          num_kernel_image_mappings - 1),
298                         GFP_KERNEL);
299         if (!hdesc) {
300                 printk(KERN_ERR "ldom_startcpu_cpuid: Cannot allocate "
301                        "hvtramp_descr.\n");
302                 return;
303         }
304
305         hdesc->cpu = cpu;
306         hdesc->num_mappings = num_kernel_image_mappings;
307
308         tb = &trap_block[cpu];
309         tb->hdesc = hdesc;
310
311         hdesc->fault_info_va = (unsigned long) &tb->fault_info;
312         hdesc->fault_info_pa = kimage_addr_to_ra(&tb->fault_info);
313
314         hdesc->thread_reg = thread_reg;
315
316         tte_vaddr = (unsigned long) KERNBASE;
317         tte_data = kern_locked_tte_data;
318
319         for (i = 0; i < hdesc->num_mappings; i++) {
320                 hdesc->maps[i].vaddr = tte_vaddr;
321                 hdesc->maps[i].tte   = tte_data;
322                 tte_vaddr += 0x400000;
323                 tte_data  += 0x400000;
324         }
325
326         trampoline_ra = kimage_addr_to_ra(hv_cpu_startup);
327
328         hv_err = sun4v_cpu_start(cpu, trampoline_ra,
329                                  kimage_addr_to_ra(&sparc64_ttable_tl0),
330                                  __pa(hdesc));
331         if (hv_err)
332                 printk(KERN_ERR "ldom_startcpu_cpuid: sun4v_cpu_start() "
333                        "gives error %lu\n", hv_err);
334 }
335 #endif
336
337 extern unsigned long sparc64_cpu_startup;
338
339 /* The OBP cpu startup callback truncates the 3rd arg cookie to
340  * 32-bits (I think) so to be safe we have it read the pointer
341  * contained here so we work on >4GB machines. -DaveM
342  */
343 static struct thread_info *cpu_new_thread = NULL;
344
345 static int __devinit smp_boot_one_cpu(unsigned int cpu)
346 {
347         struct trap_per_cpu *tb = &trap_block[cpu];
348         unsigned long entry =
349                 (unsigned long)(&sparc64_cpu_startup);
350         unsigned long cookie =
351                 (unsigned long)(&cpu_new_thread);
352         struct task_struct *p;
353         int timeout, ret;
354
355         p = fork_idle(cpu);
356         if (IS_ERR(p))
357                 return PTR_ERR(p);
358         callin_flag = 0;
359         cpu_new_thread = task_thread_info(p);
360
361         if (tlb_type == hypervisor) {
362 #if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU)
363                 if (ldom_domaining_enabled)
364                         ldom_startcpu_cpuid(cpu,
365                                             (unsigned long) cpu_new_thread);
366                 else
367 #endif
368                         prom_startcpu_cpuid(cpu, entry, cookie);
369         } else {
370                 struct device_node *dp = of_find_node_by_cpuid(cpu);
371
372                 prom_startcpu(dp->node, entry, cookie);
373         }
374
375         for (timeout = 0; timeout < 50000; timeout++) {
376                 if (callin_flag)
377                         break;
378                 udelay(100);
379         }
380
381         if (callin_flag) {
382                 ret = 0;
383         } else {
384                 printk("Processor %d is stuck.\n", cpu);
385                 ret = -ENODEV;
386         }
387         cpu_new_thread = NULL;
388
389         if (tb->hdesc) {
390                 kfree(tb->hdesc);
391                 tb->hdesc = NULL;
392         }
393
394         return ret;
395 }
396
397 static void spitfire_xcall_helper(u64 data0, u64 data1, u64 data2, u64 pstate, unsigned long cpu)
398 {
399         u64 result, target;
400         int stuck, tmp;
401
402         if (this_is_starfire) {
403                 /* map to real upaid */
404                 cpu = (((cpu & 0x3c) << 1) |
405                         ((cpu & 0x40) >> 4) |
406                         (cpu & 0x3));
407         }
408
409         target = (cpu << 14) | 0x70;
410 again:
411         /* Ok, this is the real Spitfire Errata #54.
412          * One must read back from a UDB internal register
413          * after writes to the UDB interrupt dispatch, but
414          * before the membar Sync for that write.
415          * So we use the high UDB control register (ASI 0x7f,
416          * ADDR 0x20) for the dummy read. -DaveM
417          */
418         tmp = 0x40;
419         __asm__ __volatile__(
420         "wrpr   %1, %2, %%pstate\n\t"
421         "stxa   %4, [%0] %3\n\t"
422         "stxa   %5, [%0+%8] %3\n\t"
423         "add    %0, %8, %0\n\t"
424         "stxa   %6, [%0+%8] %3\n\t"
425         "membar #Sync\n\t"
426         "stxa   %%g0, [%7] %3\n\t"
427         "membar #Sync\n\t"
428         "mov    0x20, %%g1\n\t"
429         "ldxa   [%%g1] 0x7f, %%g0\n\t"
430         "membar #Sync"
431         : "=r" (tmp)
432         : "r" (pstate), "i" (PSTATE_IE), "i" (ASI_INTR_W),
433           "r" (data0), "r" (data1), "r" (data2), "r" (target),
434           "r" (0x10), "0" (tmp)
435         : "g1");
436
437         /* NOTE: PSTATE_IE is still clear. */
438         stuck = 100000;
439         do {
440                 __asm__ __volatile__("ldxa [%%g0] %1, %0"
441                         : "=r" (result)
442                         : "i" (ASI_INTR_DISPATCH_STAT));
443                 if (result == 0) {
444                         __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
445                                              : : "r" (pstate));
446                         return;
447                 }
448                 stuck -= 1;
449                 if (stuck == 0)
450                         break;
451         } while (result & 0x1);
452         __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
453                              : : "r" (pstate));
454         if (stuck == 0) {
455                 printk("CPU[%d]: mondo stuckage result[%016lx]\n",
456                        smp_processor_id(), result);
457         } else {
458                 udelay(2);
459                 goto again;
460         }
461 }
462
463 static inline void spitfire_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask)
464 {
465         u64 pstate;
466         int i;
467
468         __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
469         for_each_cpu_mask(i, mask)
470                 spitfire_xcall_helper(data0, data1, data2, pstate, i);
471 }
472
473 /* Cheetah now allows to send the whole 64-bytes of data in the interrupt
474  * packet, but we have no use for that.  However we do take advantage of
475  * the new pipelining feature (ie. dispatch to multiple cpus simultaneously).
476  */
477 static void cheetah_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask)
478 {
479         u64 pstate, ver, busy_mask;
480         int nack_busy_id, is_jbus, need_more;
481
482         if (cpus_empty(mask))
483                 return;
484
485         /* Unfortunately, someone at Sun had the brilliant idea to make the
486          * busy/nack fields hard-coded by ITID number for this Ultra-III
487          * derivative processor.
488          */
489         __asm__ ("rdpr %%ver, %0" : "=r" (ver));
490         is_jbus = ((ver >> 32) == __JALAPENO_ID ||
491                    (ver >> 32) == __SERRANO_ID);
492
493         __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
494
495 retry:
496         need_more = 0;
497         __asm__ __volatile__("wrpr %0, %1, %%pstate\n\t"
498                              : : "r" (pstate), "i" (PSTATE_IE));
499
500         /* Setup the dispatch data registers. */
501         __asm__ __volatile__("stxa      %0, [%3] %6\n\t"
502                              "stxa      %1, [%4] %6\n\t"
503                              "stxa      %2, [%5] %6\n\t"
504                              "membar    #Sync\n\t"
505                              : /* no outputs */
506                              : "r" (data0), "r" (data1), "r" (data2),
507                                "r" (0x40), "r" (0x50), "r" (0x60),
508                                "i" (ASI_INTR_W));
509
510         nack_busy_id = 0;
511         busy_mask = 0;
512         {
513                 int i;
514
515                 for_each_cpu_mask(i, mask) {
516                         u64 target = (i << 14) | 0x70;
517
518                         if (is_jbus) {
519                                 busy_mask |= (0x1UL << (i * 2));
520                         } else {
521                                 target |= (nack_busy_id << 24);
522                                 busy_mask |= (0x1UL <<
523                                               (nack_busy_id * 2));
524                         }
525                         __asm__ __volatile__(
526                                 "stxa   %%g0, [%0] %1\n\t"
527                                 "membar #Sync\n\t"
528                                 : /* no outputs */
529                                 : "r" (target), "i" (ASI_INTR_W));
530                         nack_busy_id++;
531                         if (nack_busy_id == 32) {
532                                 need_more = 1;
533                                 break;
534                         }
535                 }
536         }
537
538         /* Now, poll for completion. */
539         {
540                 u64 dispatch_stat, nack_mask;
541                 long stuck;
542
543                 stuck = 100000 * nack_busy_id;
544                 nack_mask = busy_mask << 1;
545                 do {
546                         __asm__ __volatile__("ldxa      [%%g0] %1, %0"
547                                              : "=r" (dispatch_stat)
548                                              : "i" (ASI_INTR_DISPATCH_STAT));
549                         if (!(dispatch_stat & (busy_mask | nack_mask))) {
550                                 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
551                                                      : : "r" (pstate));
552                                 if (unlikely(need_more)) {
553                                         int i, cnt = 0;
554                                         for_each_cpu_mask(i, mask) {
555                                                 cpu_clear(i, mask);
556                                                 cnt++;
557                                                 if (cnt == 32)
558                                                         break;
559                                         }
560                                         goto retry;
561                                 }
562                                 return;
563                         }
564                         if (!--stuck)
565                                 break;
566                 } while (dispatch_stat & busy_mask);
567
568                 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
569                                      : : "r" (pstate));
570
571                 if (dispatch_stat & busy_mask) {
572                         /* Busy bits will not clear, continue instead
573                          * of freezing up on this cpu.
574                          */
575                         printk("CPU[%d]: mondo stuckage result[%016lx]\n",
576                                smp_processor_id(), dispatch_stat);
577                 } else {
578                         int i, this_busy_nack = 0;
579
580                         /* Delay some random time with interrupts enabled
581                          * to prevent deadlock.
582                          */
583                         udelay(2 * nack_busy_id);
584
585                         /* Clear out the mask bits for cpus which did not
586                          * NACK us.
587                          */
588                         for_each_cpu_mask(i, mask) {
589                                 u64 check_mask;
590
591                                 if (is_jbus)
592                                         check_mask = (0x2UL << (2*i));
593                                 else
594                                         check_mask = (0x2UL <<
595                                                       this_busy_nack);
596                                 if ((dispatch_stat & check_mask) == 0)
597                                         cpu_clear(i, mask);
598                                 this_busy_nack += 2;
599                                 if (this_busy_nack == 64)
600                                         break;
601                         }
602
603                         goto retry;
604                 }
605         }
606 }
607
608 /* Multi-cpu list version.  */
609 static void hypervisor_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask)
610 {
611         struct trap_per_cpu *tb;
612         u16 *cpu_list;
613         u64 *mondo;
614         cpumask_t error_mask;
615         unsigned long flags, status;
616         int cnt, retries, this_cpu, prev_sent, i;
617
618         if (cpus_empty(mask))
619                 return;
620
621         /* We have to do this whole thing with interrupts fully disabled.
622          * Otherwise if we send an xcall from interrupt context it will
623          * corrupt both our mondo block and cpu list state.
624          *
625          * One consequence of this is that we cannot use timeout mechanisms
626          * that depend upon interrupts being delivered locally.  So, for
627          * example, we cannot sample jiffies and expect it to advance.
628          *
629          * Fortunately, udelay() uses %stick/%tick so we can use that.
630          */
631         local_irq_save(flags);
632
633         this_cpu = smp_processor_id();
634         tb = &trap_block[this_cpu];
635
636         mondo = __va(tb->cpu_mondo_block_pa);
637         mondo[0] = data0;
638         mondo[1] = data1;
639         mondo[2] = data2;
640         wmb();
641
642         cpu_list = __va(tb->cpu_list_pa);
643
644         /* Setup the initial cpu list.  */
645         cnt = 0;
646         for_each_cpu_mask(i, mask)
647                 cpu_list[cnt++] = i;
648
649         cpus_clear(error_mask);
650         retries = 0;
651         prev_sent = 0;
652         do {
653                 int forward_progress, n_sent;
654
655                 status = sun4v_cpu_mondo_send(cnt,
656                                               tb->cpu_list_pa,
657                                               tb->cpu_mondo_block_pa);
658
659                 /* HV_EOK means all cpus received the xcall, we're done.  */
660                 if (likely(status == HV_EOK))
661                         break;
662
663                 /* First, see if we made any forward progress.
664                  *
665                  * The hypervisor indicates successful sends by setting
666                  * cpu list entries to the value 0xffff.
667                  */
668                 n_sent = 0;
669                 for (i = 0; i < cnt; i++) {
670                         if (likely(cpu_list[i] == 0xffff))
671                                 n_sent++;
672                 }
673
674                 forward_progress = 0;
675                 if (n_sent > prev_sent)
676                         forward_progress = 1;
677
678                 prev_sent = n_sent;
679
680                 /* If we get a HV_ECPUERROR, then one or more of the cpus
681                  * in the list are in error state.  Use the cpu_state()
682                  * hypervisor call to find out which cpus are in error state.
683                  */
684                 if (unlikely(status == HV_ECPUERROR)) {
685                         for (i = 0; i < cnt; i++) {
686                                 long err;
687                                 u16 cpu;
688
689                                 cpu = cpu_list[i];
690                                 if (cpu == 0xffff)
691                                         continue;
692
693                                 err = sun4v_cpu_state(cpu);
694                                 if (err >= 0 &&
695                                     err == HV_CPU_STATE_ERROR) {
696                                         cpu_list[i] = 0xffff;
697                                         cpu_set(cpu, error_mask);
698                                 }
699                         }
700                 } else if (unlikely(status != HV_EWOULDBLOCK))
701                         goto fatal_mondo_error;
702
703                 /* Don't bother rewriting the CPU list, just leave the
704                  * 0xffff and non-0xffff entries in there and the
705                  * hypervisor will do the right thing.
706                  *
707                  * Only advance timeout state if we didn't make any
708                  * forward progress.
709                  */
710                 if (unlikely(!forward_progress)) {
711                         if (unlikely(++retries > 10000))
712                                 goto fatal_mondo_timeout;
713
714                         /* Delay a little bit to let other cpus catch up
715                          * on their cpu mondo queue work.
716                          */
717                         udelay(2 * cnt);
718                 }
719         } while (1);
720
721         local_irq_restore(flags);
722
723         if (unlikely(!cpus_empty(error_mask)))
724                 goto fatal_mondo_cpu_error;
725
726         return;
727
728 fatal_mondo_cpu_error:
729         printk(KERN_CRIT "CPU[%d]: SUN4V mondo cpu error, some target cpus "
730                "were in error state\n",
731                this_cpu);
732         printk(KERN_CRIT "CPU[%d]: Error mask [ ", this_cpu);
733         for_each_cpu_mask(i, error_mask)
734                 printk("%d ", i);
735         printk("]\n");
736         return;
737
738 fatal_mondo_timeout:
739         local_irq_restore(flags);
740         printk(KERN_CRIT "CPU[%d]: SUN4V mondo timeout, no forward "
741                " progress after %d retries.\n",
742                this_cpu, retries);
743         goto dump_cpu_list_and_out;
744
745 fatal_mondo_error:
746         local_irq_restore(flags);
747         printk(KERN_CRIT "CPU[%d]: Unexpected SUN4V mondo error %lu\n",
748                this_cpu, status);
749         printk(KERN_CRIT "CPU[%d]: Args were cnt(%d) cpulist_pa(%lx) "
750                "mondo_block_pa(%lx)\n",
751                this_cpu, cnt, tb->cpu_list_pa, tb->cpu_mondo_block_pa);
752
753 dump_cpu_list_and_out:
754         printk(KERN_CRIT "CPU[%d]: CPU list [ ", this_cpu);
755         for (i = 0; i < cnt; i++)
756                 printk("%u ", cpu_list[i]);
757         printk("]\n");
758 }
759
760 /* Send cross call to all processors mentioned in MASK
761  * except self.
762  */
763 static void smp_cross_call_masked(unsigned long *func, u32 ctx, u64 data1, u64 data2, cpumask_t mask)
764 {
765         u64 data0 = (((u64)ctx)<<32 | (((u64)func) & 0xffffffff));
766         int this_cpu = get_cpu();
767
768         cpus_and(mask, mask, cpu_online_map);
769         cpu_clear(this_cpu, mask);
770
771         if (tlb_type == spitfire)
772                 spitfire_xcall_deliver(data0, data1, data2, mask);
773         else if (tlb_type == cheetah || tlb_type == cheetah_plus)
774                 cheetah_xcall_deliver(data0, data1, data2, mask);
775         else
776                 hypervisor_xcall_deliver(data0, data1, data2, mask);
777         /* NOTE: Caller runs local copy on master. */
778
779         put_cpu();
780 }
781
782 extern unsigned long xcall_sync_tick;
783
784 static void smp_start_sync_tick_client(int cpu)
785 {
786         cpumask_t mask = cpumask_of_cpu(cpu);
787
788         smp_cross_call_masked(&xcall_sync_tick,
789                               0, 0, 0, mask);
790 }
791
792 /* Send cross call to all processors except self. */
793 #define smp_cross_call(func, ctx, data1, data2) \
794         smp_cross_call_masked(func, ctx, data1, data2, cpu_online_map)
795
796 struct call_data_struct {
797         void (*func) (void *info);
798         void *info;
799         atomic_t finished;
800         int wait;
801 };
802
803 static struct call_data_struct *call_data;
804
805 extern unsigned long xcall_call_function;
806
807 /**
808  * smp_call_function(): Run a function on all other CPUs.
809  * @func: The function to run. This must be fast and non-blocking.
810  * @info: An arbitrary pointer to pass to the function.
811  * @nonatomic: currently unused.
812  * @wait: If true, wait (atomically) until function has completed on other CPUs.
813  *
814  * Returns 0 on success, else a negative status code. Does not return until
815  * remote CPUs are nearly ready to execute <<func>> or are or have executed.
816  *
817  * You must not call this function with disabled interrupts or from a
818  * hardware interrupt handler or from a bottom half handler.
819  */
820 static int smp_call_function_mask(void (*func)(void *info), void *info,
821                                   int nonatomic, int wait, cpumask_t mask)
822 {
823         struct call_data_struct data;
824         int cpus;
825
826         /* Can deadlock when called with interrupts disabled */
827         WARN_ON(irqs_disabled());
828
829         data.func = func;
830         data.info = info;
831         atomic_set(&data.finished, 0);
832         data.wait = wait;
833
834         spin_lock(&call_lock);
835
836         cpu_clear(smp_processor_id(), mask);
837         cpus = cpus_weight(mask);
838         if (!cpus)
839                 goto out_unlock;
840
841         call_data = &data;
842         mb();
843
844         smp_cross_call_masked(&xcall_call_function, 0, 0, 0, mask);
845
846         /* Wait for response */
847         while (atomic_read(&data.finished) != cpus)
848                 cpu_relax();
849
850 out_unlock:
851         spin_unlock(&call_lock);
852
853         return 0;
854 }
855
856 int smp_call_function(void (*func)(void *info), void *info,
857                       int nonatomic, int wait)
858 {
859         return smp_call_function_mask(func, info, nonatomic, wait,
860                                       cpu_online_map);
861 }
862
863 void smp_call_function_client(int irq, struct pt_regs *regs)
864 {
865         void (*func) (void *info) = call_data->func;
866         void *info = call_data->info;
867
868         clear_softint(1 << irq);
869
870         irq_enter();
871
872         if (!call_data->wait) {
873                 /* let initiator proceed after getting data */
874                 atomic_inc(&call_data->finished);
875         }
876
877         func(info);
878
879         irq_exit();
880
881         if (call_data->wait) {
882                 /* let initiator proceed only after completion */
883                 atomic_inc(&call_data->finished);
884         }
885 }
886
887 static void tsb_sync(void *info)
888 {
889         struct trap_per_cpu *tp = &trap_block[raw_smp_processor_id()];
890         struct mm_struct *mm = info;
891
892         /* It is not valid to test "currrent->active_mm == mm" here.
893          *
894          * The value of "current" is not changed atomically with
895          * switch_mm().  But that's OK, we just need to check the
896          * current cpu's trap block PGD physical address.
897          */
898         if (tp->pgd_paddr == __pa(mm->pgd))
899                 tsb_context_switch(mm);
900 }
901
902 void smp_tsb_sync(struct mm_struct *mm)
903 {
904         smp_call_function_mask(tsb_sync, mm, 0, 1, mm->cpu_vm_mask);
905 }
906
907 extern unsigned long xcall_flush_tlb_mm;
908 extern unsigned long xcall_flush_tlb_pending;
909 extern unsigned long xcall_flush_tlb_kernel_range;
910 extern unsigned long xcall_report_regs;
911 extern unsigned long xcall_receive_signal;
912 extern unsigned long xcall_new_mmu_context_version;
913
914 #ifdef DCACHE_ALIASING_POSSIBLE
915 extern unsigned long xcall_flush_dcache_page_cheetah;
916 #endif
917 extern unsigned long xcall_flush_dcache_page_spitfire;
918
919 #ifdef CONFIG_DEBUG_DCFLUSH
920 extern atomic_t dcpage_flushes;
921 extern atomic_t dcpage_flushes_xcall;
922 #endif
923
924 static inline void __local_flush_dcache_page(struct page *page)
925 {
926 #ifdef DCACHE_ALIASING_POSSIBLE
927         __flush_dcache_page(page_address(page),
928                             ((tlb_type == spitfire) &&
929                              page_mapping(page) != NULL));
930 #else
931         if (page_mapping(page) != NULL &&
932             tlb_type == spitfire)
933                 __flush_icache_page(__pa(page_address(page)));
934 #endif
935 }
936
937 void smp_flush_dcache_page_impl(struct page *page, int cpu)
938 {
939         cpumask_t mask = cpumask_of_cpu(cpu);
940         int this_cpu;
941
942         if (tlb_type == hypervisor)
943                 return;
944
945 #ifdef CONFIG_DEBUG_DCFLUSH
946         atomic_inc(&dcpage_flushes);
947 #endif
948
949         this_cpu = get_cpu();
950
951         if (cpu == this_cpu) {
952                 __local_flush_dcache_page(page);
953         } else if (cpu_online(cpu)) {
954                 void *pg_addr = page_address(page);
955                 u64 data0;
956
957                 if (tlb_type == spitfire) {
958                         data0 =
959                                 ((u64)&xcall_flush_dcache_page_spitfire);
960                         if (page_mapping(page) != NULL)
961                                 data0 |= ((u64)1 << 32);
962                         spitfire_xcall_deliver(data0,
963                                                __pa(pg_addr),
964                                                (u64) pg_addr,
965                                                mask);
966                 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
967 #ifdef DCACHE_ALIASING_POSSIBLE
968                         data0 =
969                                 ((u64)&xcall_flush_dcache_page_cheetah);
970                         cheetah_xcall_deliver(data0,
971                                               __pa(pg_addr),
972                                               0, mask);
973 #endif
974                 }
975 #ifdef CONFIG_DEBUG_DCFLUSH
976                 atomic_inc(&dcpage_flushes_xcall);
977 #endif
978         }
979
980         put_cpu();
981 }
982
983 void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
984 {
985         void *pg_addr = page_address(page);
986         cpumask_t mask = cpu_online_map;
987         u64 data0;
988         int this_cpu;
989
990         if (tlb_type == hypervisor)
991                 return;
992
993         this_cpu = get_cpu();
994
995         cpu_clear(this_cpu, mask);
996
997 #ifdef CONFIG_DEBUG_DCFLUSH
998         atomic_inc(&dcpage_flushes);
999 #endif
1000         if (cpus_empty(mask))
1001                 goto flush_self;
1002         if (tlb_type == spitfire) {
1003                 data0 = ((u64)&xcall_flush_dcache_page_spitfire);
1004                 if (page_mapping(page) != NULL)
1005                         data0 |= ((u64)1 << 32);
1006                 spitfire_xcall_deliver(data0,
1007                                        __pa(pg_addr),
1008                                        (u64) pg_addr,
1009                                        mask);
1010         } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
1011 #ifdef DCACHE_ALIASING_POSSIBLE
1012                 data0 = ((u64)&xcall_flush_dcache_page_cheetah);
1013                 cheetah_xcall_deliver(data0,
1014                                       __pa(pg_addr),
1015                                       0, mask);
1016 #endif
1017         }
1018 #ifdef CONFIG_DEBUG_DCFLUSH
1019         atomic_inc(&dcpage_flushes_xcall);
1020 #endif
1021  flush_self:
1022         __local_flush_dcache_page(page);
1023
1024         put_cpu();
1025 }
1026
1027 static void __smp_receive_signal_mask(cpumask_t mask)
1028 {
1029         smp_cross_call_masked(&xcall_receive_signal, 0, 0, 0, mask);
1030 }
1031
1032 void smp_receive_signal(int cpu)
1033 {
1034         cpumask_t mask = cpumask_of_cpu(cpu);
1035
1036         if (cpu_online(cpu))
1037                 __smp_receive_signal_mask(mask);
1038 }
1039
1040 void smp_receive_signal_client(int irq, struct pt_regs *regs)
1041 {
1042         irq_enter();
1043         clear_softint(1 << irq);
1044         irq_exit();
1045 }
1046
1047 void smp_new_mmu_context_version_client(int irq, struct pt_regs *regs)
1048 {
1049         struct mm_struct *mm;
1050         unsigned long flags;
1051
1052         irq_enter();
1053
1054         clear_softint(1 << irq);
1055
1056         /* See if we need to allocate a new TLB context because
1057          * the version of the one we are using is now out of date.
1058          */
1059         mm = current->active_mm;
1060         if (unlikely(!mm || (mm == &init_mm)))
1061                 return;
1062
1063         spin_lock_irqsave(&mm->context.lock, flags);
1064
1065         if (unlikely(!CTX_VALID(mm->context)))
1066                 get_new_mmu_context(mm);
1067
1068         spin_unlock_irqrestore(&mm->context.lock, flags);
1069
1070         load_secondary_context(mm);
1071         __flush_tlb_mm(CTX_HWBITS(mm->context),
1072                        SECONDARY_CONTEXT);
1073
1074         irq_exit();
1075 }
1076
1077 void smp_new_mmu_context_version(void)
1078 {
1079         smp_cross_call(&xcall_new_mmu_context_version, 0, 0, 0);
1080 }
1081
1082 void smp_report_regs(void)
1083 {
1084         smp_cross_call(&xcall_report_regs, 0, 0, 0);
1085 }
1086
1087 /* We know that the window frames of the user have been flushed
1088  * to the stack before we get here because all callers of us
1089  * are flush_tlb_*() routines, and these run after flush_cache_*()
1090  * which performs the flushw.
1091  *
1092  * The SMP TLB coherency scheme we use works as follows:
1093  *
1094  * 1) mm->cpu_vm_mask is a bit mask of which cpus an address
1095  *    space has (potentially) executed on, this is the heuristic
1096  *    we use to avoid doing cross calls.
1097  *
1098  *    Also, for flushing from kswapd and also for clones, we
1099  *    use cpu_vm_mask as the list of cpus to make run the TLB.
1100  *
1101  * 2) TLB context numbers are shared globally across all processors
1102  *    in the system, this allows us to play several games to avoid
1103  *    cross calls.
1104  *
1105  *    One invariant is that when a cpu switches to a process, and
1106  *    that processes tsk->active_mm->cpu_vm_mask does not have the
1107  *    current cpu's bit set, that tlb context is flushed locally.
1108  *
1109  *    If the address space is non-shared (ie. mm->count == 1) we avoid
1110  *    cross calls when we want to flush the currently running process's
1111  *    tlb state.  This is done by clearing all cpu bits except the current
1112  *    processor's in current->active_mm->cpu_vm_mask and performing the
1113  *    flush locally only.  This will force any subsequent cpus which run
1114  *    this task to flush the context from the local tlb if the process
1115  *    migrates to another cpu (again).
1116  *
1117  * 3) For shared address spaces (threads) and swapping we bite the
1118  *    bullet for most cases and perform the cross call (but only to
1119  *    the cpus listed in cpu_vm_mask).
1120  *
1121  *    The performance gain from "optimizing" away the cross call for threads is
1122  *    questionable (in theory the big win for threads is the massive sharing of
1123  *    address space state across processors).
1124  */
1125
1126 /* This currently is only used by the hugetlb arch pre-fault
1127  * hook on UltraSPARC-III+ and later when changing the pagesize
1128  * bits of the context register for an address space.
1129  */
1130 void smp_flush_tlb_mm(struct mm_struct *mm)
1131 {
1132         u32 ctx = CTX_HWBITS(mm->context);
1133         int cpu = get_cpu();
1134
1135         if (atomic_read(&mm->mm_users) == 1) {
1136                 mm->cpu_vm_mask = cpumask_of_cpu(cpu);
1137                 goto local_flush_and_out;
1138         }
1139
1140         smp_cross_call_masked(&xcall_flush_tlb_mm,
1141                               ctx, 0, 0,
1142                               mm->cpu_vm_mask);
1143
1144 local_flush_and_out:
1145         __flush_tlb_mm(ctx, SECONDARY_CONTEXT);
1146
1147         put_cpu();
1148 }
1149
1150 void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long *vaddrs)
1151 {
1152         u32 ctx = CTX_HWBITS(mm->context);
1153         int cpu = get_cpu();
1154
1155         if (mm == current->active_mm && atomic_read(&mm->mm_users) == 1)
1156                 mm->cpu_vm_mask = cpumask_of_cpu(cpu);
1157         else
1158                 smp_cross_call_masked(&xcall_flush_tlb_pending,
1159                                       ctx, nr, (unsigned long) vaddrs,
1160                                       mm->cpu_vm_mask);
1161
1162         __flush_tlb_pending(ctx, nr, vaddrs);
1163
1164         put_cpu();
1165 }
1166
1167 void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end)
1168 {
1169         start &= PAGE_MASK;
1170         end    = PAGE_ALIGN(end);
1171         if (start != end) {
1172                 smp_cross_call(&xcall_flush_tlb_kernel_range,
1173                                0, start, end);
1174
1175                 __flush_tlb_kernel_range(start, end);
1176         }
1177 }
1178
1179 /* CPU capture. */
1180 /* #define CAPTURE_DEBUG */
1181 extern unsigned long xcall_capture;
1182
1183 static atomic_t smp_capture_depth = ATOMIC_INIT(0);
1184 static atomic_t smp_capture_registry = ATOMIC_INIT(0);
1185 static unsigned long penguins_are_doing_time;
1186
1187 void smp_capture(void)
1188 {
1189         int result = atomic_add_ret(1, &smp_capture_depth);
1190
1191         if (result == 1) {
1192                 int ncpus = num_online_cpus();
1193
1194 #ifdef CAPTURE_DEBUG
1195                 printk("CPU[%d]: Sending penguins to jail...",
1196                        smp_processor_id());
1197 #endif
1198                 penguins_are_doing_time = 1;
1199                 membar_storestore_loadstore();
1200                 atomic_inc(&smp_capture_registry);
1201                 smp_cross_call(&xcall_capture, 0, 0, 0);
1202                 while (atomic_read(&smp_capture_registry) != ncpus)
1203                         rmb();
1204 #ifdef CAPTURE_DEBUG
1205                 printk("done\n");
1206 #endif
1207         }
1208 }
1209
1210 void smp_release(void)
1211 {
1212         if (atomic_dec_and_test(&smp_capture_depth)) {
1213 #ifdef CAPTURE_DEBUG
1214                 printk("CPU[%d]: Giving pardon to "
1215                        "imprisoned penguins\n",
1216                        smp_processor_id());
1217 #endif
1218                 penguins_are_doing_time = 0;
1219                 membar_storeload_storestore();
1220                 atomic_dec(&smp_capture_registry);
1221         }
1222 }
1223
1224 /* Imprisoned penguins run with %pil == 15, but PSTATE_IE set, so they
1225  * can service tlb flush xcalls...
1226  */
1227 extern void prom_world(int);
1228
1229 void smp_penguin_jailcell(int irq, struct pt_regs *regs)
1230 {
1231         clear_softint(1 << irq);
1232
1233         irq_enter();
1234
1235         preempt_disable();
1236
1237         __asm__ __volatile__("flushw");
1238         prom_world(1);
1239         atomic_inc(&smp_capture_registry);
1240         membar_storeload_storestore();
1241         while (penguins_are_doing_time)
1242                 rmb();
1243         atomic_dec(&smp_capture_registry);
1244         prom_world(0);
1245
1246         preempt_enable();
1247
1248         irq_exit();
1249 }
1250
1251 /* /proc/profile writes can call this, don't __init it please. */
1252 int setup_profiling_timer(unsigned int multiplier)
1253 {
1254         return -EINVAL;
1255 }
1256
1257 void __init smp_prepare_cpus(unsigned int max_cpus)
1258 {
1259 }
1260
1261 void __devinit smp_prepare_boot_cpu(void)
1262 {
1263 }
1264
1265 void __devinit smp_fill_in_sib_core_maps(void)
1266 {
1267         unsigned int i;
1268
1269         for_each_present_cpu(i) {
1270                 unsigned int j;
1271
1272                 cpus_clear(cpu_core_map[i]);
1273                 if (cpu_data(i).core_id == 0) {
1274                         cpu_set(i, cpu_core_map[i]);
1275                         continue;
1276                 }
1277
1278                 for_each_present_cpu(j) {
1279                         if (cpu_data(i).core_id ==
1280                             cpu_data(j).core_id)
1281                                 cpu_set(j, cpu_core_map[i]);
1282                 }
1283         }
1284
1285         for_each_present_cpu(i) {
1286                 unsigned int j;
1287
1288                 cpus_clear(per_cpu(cpu_sibling_map, i));
1289                 if (cpu_data(i).proc_id == -1) {
1290                         cpu_set(i, per_cpu(cpu_sibling_map, i));
1291                         continue;
1292                 }
1293
1294                 for_each_present_cpu(j) {
1295                         if (cpu_data(i).proc_id ==
1296                             cpu_data(j).proc_id)
1297                                 cpu_set(j, per_cpu(cpu_sibling_map, i));
1298                 }
1299         }
1300 }
1301
1302 int __cpuinit __cpu_up(unsigned int cpu)
1303 {
1304         int ret = smp_boot_one_cpu(cpu);
1305
1306         if (!ret) {
1307                 cpu_set(cpu, smp_commenced_mask);
1308                 while (!cpu_isset(cpu, cpu_online_map))
1309                         mb();
1310                 if (!cpu_isset(cpu, cpu_online_map)) {
1311                         ret = -ENODEV;
1312                 } else {
1313                         /* On SUN4V, writes to %tick and %stick are
1314                          * not allowed.
1315                          */
1316                         if (tlb_type != hypervisor)
1317                                 smp_synchronize_one_tick(cpu);
1318                 }
1319         }
1320         return ret;
1321 }
1322
1323 #ifdef CONFIG_HOTPLUG_CPU
1324 void cpu_play_dead(void)
1325 {
1326         int cpu = smp_processor_id();
1327         unsigned long pstate;
1328
1329         idle_task_exit();
1330
1331         if (tlb_type == hypervisor) {
1332                 struct trap_per_cpu *tb = &trap_block[cpu];
1333
1334                 sun4v_cpu_qconf(HV_CPU_QUEUE_CPU_MONDO,
1335                                 tb->cpu_mondo_pa, 0);
1336                 sun4v_cpu_qconf(HV_CPU_QUEUE_DEVICE_MONDO,
1337                                 tb->dev_mondo_pa, 0);
1338                 sun4v_cpu_qconf(HV_CPU_QUEUE_RES_ERROR,
1339                                 tb->resum_mondo_pa, 0);
1340                 sun4v_cpu_qconf(HV_CPU_QUEUE_NONRES_ERROR,
1341                                 tb->nonresum_mondo_pa, 0);
1342         }
1343
1344         cpu_clear(cpu, smp_commenced_mask);
1345         membar_safe("#Sync");
1346
1347         local_irq_disable();
1348
1349         __asm__ __volatile__(
1350                 "rdpr   %%pstate, %0\n\t"
1351                 "wrpr   %0, %1, %%pstate"
1352                 : "=r" (pstate)
1353                 : "i" (PSTATE_IE));
1354
1355         while (1)
1356                 barrier();
1357 }
1358
1359 int __cpu_disable(void)
1360 {
1361         int cpu = smp_processor_id();
1362         cpuinfo_sparc *c;
1363         int i;
1364
1365         for_each_cpu_mask(i, cpu_core_map[cpu])
1366                 cpu_clear(cpu, cpu_core_map[i]);
1367         cpus_clear(cpu_core_map[cpu]);
1368
1369         for_each_cpu_mask(i, per_cpu(cpu_sibling_map, cpu))
1370                 cpu_clear(cpu, per_cpu(cpu_sibling_map, i));
1371         cpus_clear(per_cpu(cpu_sibling_map, cpu));
1372
1373         c = &cpu_data(cpu);
1374
1375         c->core_id = 0;
1376         c->proc_id = -1;
1377
1378         spin_lock(&call_lock);
1379         cpu_clear(cpu, cpu_online_map);
1380         spin_unlock(&call_lock);
1381
1382         smp_wmb();
1383
1384         /* Make sure no interrupts point to this cpu.  */
1385         fixup_irqs();
1386
1387         local_irq_enable();
1388         mdelay(1);
1389         local_irq_disable();
1390
1391         return 0;
1392 }
1393
1394 void __cpu_die(unsigned int cpu)
1395 {
1396         int i;
1397
1398         for (i = 0; i < 100; i++) {
1399                 smp_rmb();
1400                 if (!cpu_isset(cpu, smp_commenced_mask))
1401                         break;
1402                 msleep(100);
1403         }
1404         if (cpu_isset(cpu, smp_commenced_mask)) {
1405                 printk(KERN_ERR "CPU %u didn't die...\n", cpu);
1406         } else {
1407 #if defined(CONFIG_SUN_LDOMS)
1408                 unsigned long hv_err;
1409                 int limit = 100;
1410
1411                 do {
1412                         hv_err = sun4v_cpu_stop(cpu);
1413                         if (hv_err == HV_EOK) {
1414                                 cpu_clear(cpu, cpu_present_map);
1415                                 break;
1416                         }
1417                 } while (--limit > 0);
1418                 if (limit <= 0) {
1419                         printk(KERN_ERR "sun4v_cpu_stop() fails err=%lu\n",
1420                                hv_err);
1421                 }
1422 #endif
1423         }
1424 }
1425 #endif
1426
1427 void __init smp_cpus_done(unsigned int max_cpus)
1428 {
1429 }
1430
1431 void smp_send_reschedule(int cpu)
1432 {
1433         smp_receive_signal(cpu);
1434 }
1435
1436 /* This is a nop because we capture all other cpus
1437  * anyways when making the PROM active.
1438  */
1439 void smp_send_stop(void)
1440 {
1441 }
1442
1443 unsigned long __per_cpu_base __read_mostly;
1444 unsigned long __per_cpu_shift __read_mostly;
1445
1446 EXPORT_SYMBOL(__per_cpu_base);
1447 EXPORT_SYMBOL(__per_cpu_shift);
1448
1449 void __init real_setup_per_cpu_areas(void)
1450 {
1451         unsigned long paddr, goal, size, i;
1452         char *ptr;
1453
1454         /* Copy section for each CPU (we discard the original) */
1455         goal = PERCPU_ENOUGH_ROOM;
1456
1457         __per_cpu_shift = PAGE_SHIFT;
1458         for (size = PAGE_SIZE; size < goal; size <<= 1UL)
1459                 __per_cpu_shift++;
1460
1461         paddr = lmb_alloc(size * NR_CPUS, PAGE_SIZE);
1462         if (!paddr) {
1463                 prom_printf("Cannot allocate per-cpu memory.\n");
1464                 prom_halt();
1465         }
1466
1467         ptr = __va(paddr);
1468         __per_cpu_base = ptr - __per_cpu_start;
1469
1470         for (i = 0; i < NR_CPUS; i++, ptr += size)
1471                 memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
1472
1473         /* Setup %g5 for the boot cpu.  */
1474         __local_per_cpu_offset = __per_cpu_offset(smp_processor_id());
1475 }