2 * arch/s390/kernel/smp.c
4 * Copyright IBM Corp. 1999,2007
5 * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
6 * Martin Schwidefsky (schwidefsky@de.ibm.com)
7 * Heiko Carstens (heiko.carstens@de.ibm.com)
9 * based on other smp stuff by
10 * (c) 1995 Alan Cox, CymruNET Ltd <alan@cymru.net>
11 * (c) 1998 Ingo Molnar
13 * We work with logical cpu numbering everywhere we can. The only
14 * functions using the real cpu address (got from STAP) are the sigp
15 * functions. For all other functions we use the identity mapping.
16 * That means that cpu_number_map[i] == i for every cpu. cpu_number_map is
17 * used e.g. to find the idle task belonging to a logical cpu. Every array
18 * in the kernel is sorted by the logical cpu number and not by the physical
19 * one which is causing all the confusion with __cpu_logical_map and
20 * cpu_number_map in other architectures.
23 #include <linux/module.h>
24 #include <linux/init.h>
26 #include <linux/spinlock.h>
27 #include <linux/kernel_stat.h>
28 #include <linux/delay.h>
29 #include <linux/cache.h>
30 #include <linux/interrupt.h>
31 #include <linux/cpu.h>
32 #include <linux/timex.h>
33 #include <linux/bootmem.h>
35 #include <asm/setup.h>
37 #include <asm/pgalloc.h>
39 #include <asm/s390_ext.h>
40 #include <asm/cpcmd.h>
41 #include <asm/tlbflush.h>
42 #include <asm/timer.h>
43 #include <asm/lowcore.h>
46 * An array with a pointer the lowcore of every CPU.
48 struct _lowcore *lowcore_ptr[NR_CPUS];
49 EXPORT_SYMBOL(lowcore_ptr);
51 cpumask_t cpu_online_map = CPU_MASK_NONE;
52 EXPORT_SYMBOL(cpu_online_map);
54 cpumask_t cpu_possible_map = CPU_MASK_NONE;
55 EXPORT_SYMBOL(cpu_possible_map);
57 static struct task_struct *current_set[NR_CPUS];
59 static void smp_ext_bitcall(int, ec_bit_sig);
62 * Structure and data for __smp_call_function_map(). This is designed to
63 * minimise static memory requirements. It also looks cleaner.
65 static DEFINE_SPINLOCK(call_lock);
67 struct call_data_struct {
68 void (*func) (void *info);
75 static struct call_data_struct *call_data;
78 * 'Call function' interrupt callback
80 static void do_call_function(void)
82 void (*func) (void *info) = call_data->func;
83 void *info = call_data->info;
84 int wait = call_data->wait;
86 cpu_set(smp_processor_id(), call_data->started);
89 cpu_set(smp_processor_id(), call_data->finished);;
92 static void __smp_call_function_map(void (*func) (void *info), void *info,
93 int nonatomic, int wait, cpumask_t map)
95 struct call_data_struct data;
99 * Can deadlock when interrupts are disabled or if in wrong context.
101 WARN_ON(irqs_disabled() || in_irq());
104 * Check for local function call. We have to have the same call order
105 * as in on_each_cpu() because of machine_restart_smp().
107 if (cpu_isset(smp_processor_id(), map)) {
109 cpu_clear(smp_processor_id(), map);
112 cpus_and(map, map, cpu_online_map);
118 data.started = CPU_MASK_NONE;
121 data.finished = CPU_MASK_NONE;
123 spin_lock(&call_lock);
126 for_each_cpu_mask(cpu, map)
127 smp_ext_bitcall(cpu, ec_call_function);
129 /* Wait for response */
130 while (!cpus_equal(map, data.started))
133 while (!cpus_equal(map, data.finished))
135 spin_unlock(&call_lock);
146 * @func: the function to run; this must be fast and non-blocking
147 * @info: an arbitrary pointer to pass to the function
149 * @wait: if true, wait (atomically) until function has completed on other CPUs
151 * Run a function on all other CPUs.
153 * You must not call this function with disabled interrupts, from a
154 * hardware interrupt handler or from a bottom half.
156 int smp_call_function(void (*func) (void *info), void *info, int nonatomic,
162 map = cpu_online_map;
163 cpu_clear(smp_processor_id(), map);
164 __smp_call_function_map(func, info, nonatomic, wait, map);
168 EXPORT_SYMBOL(smp_call_function);
171 * smp_call_function_single:
172 * @cpu: the CPU where func should run
173 * @func: the function to run; this must be fast and non-blocking
174 * @info: an arbitrary pointer to pass to the function
176 * @wait: if true, wait (atomically) until function has completed on other CPUs
178 * Run a function on one processor.
180 * You must not call this function with disabled interrupts, from a
181 * hardware interrupt handler or from a bottom half.
183 int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
184 int nonatomic, int wait)
187 __smp_call_function_map(func, info, nonatomic, wait,
188 cpumask_of_cpu(cpu));
192 EXPORT_SYMBOL(smp_call_function_single);
194 static void do_send_stop(void)
198 /* stop all processors */
199 for_each_online_cpu(cpu) {
200 if (cpu == smp_processor_id())
203 rc = signal_processor(cpu, sigp_stop);
204 } while (rc == sigp_busy);
208 static void do_store_status(void)
212 /* store status of all processors in their lowcores (real 0) */
213 for_each_online_cpu(cpu) {
214 if (cpu == smp_processor_id())
217 rc = signal_processor_p(
218 (__u32)(unsigned long) lowcore_ptr[cpu], cpu,
219 sigp_store_status_at_address);
220 } while (rc == sigp_busy);
224 static void do_wait_for_stop(void)
228 /* Wait for all other cpus to enter stopped state */
229 for_each_online_cpu(cpu) {
230 if (cpu == smp_processor_id())
232 while (!smp_cpu_not_running(cpu))
238 * this function sends a 'stop' sigp to all other CPUs in the system.
239 * it goes straight through.
241 void smp_send_stop(void)
243 /* Disable all interrupts/machine checks */
244 __load_psw_mask(psw_kernel_bits & ~PSW_MASK_MCHECK);
246 /* write magic number to zero page (absolute 0) */
247 lowcore_ptr[smp_processor_id()]->panic_magic = __PANIC_MAGIC;
249 /* stop other processors. */
252 /* wait until other processors are stopped */
255 /* store status of other processors. */
260 * Reboot, halt and power_off routines for SMP.
262 void machine_restart_smp(char *__unused)
268 void machine_halt_smp(void)
271 if (MACHINE_IS_VM && strlen(vmhalt_cmd) > 0)
272 __cpcmd(vmhalt_cmd, NULL, 0, NULL);
273 signal_processor(smp_processor_id(), sigp_stop_and_store_status);
277 void machine_power_off_smp(void)
280 if (MACHINE_IS_VM && strlen(vmpoff_cmd) > 0)
281 __cpcmd(vmpoff_cmd, NULL, 0, NULL);
282 signal_processor(smp_processor_id(), sigp_stop_and_store_status);
287 * This is the main routine where commands issued by other
291 static void do_ext_call_interrupt(__u16 code)
296 * handle bit signal external calls
298 * For the ec_schedule signal we have to do nothing. All the work
299 * is done automatically when we return from the interrupt.
301 bits = xchg(&S390_lowcore.ext_call_fast, 0);
303 if (test_bit(ec_call_function, &bits))
308 * Send an external call sigp to another cpu and return without waiting
309 * for its completion.
311 static void smp_ext_bitcall(int cpu, ec_bit_sig sig)
314 * Set signaling bit in lowcore of target cpu and kick it
316 set_bit(sig, (unsigned long *) &lowcore_ptr[cpu]->ext_call_fast);
317 while (signal_processor(cpu, sigp_emergency_signal) == sigp_busy)
323 * this function sends a 'purge tlb' signal to another CPU.
325 void smp_ptlb_callback(void *info)
330 void smp_ptlb_all(void)
332 on_each_cpu(smp_ptlb_callback, NULL, 0, 1);
334 EXPORT_SYMBOL(smp_ptlb_all);
335 #endif /* ! CONFIG_64BIT */
338 * this function sends a 'reschedule' IPI to another CPU.
339 * it goes straight through and wastes no time serializing
340 * anything. Worst case is that we lose a reschedule ...
342 void smp_send_reschedule(int cpu)
344 smp_ext_bitcall(cpu, ec_schedule);
348 * parameter area for the set/clear control bit callbacks
350 struct ec_creg_mask_parms {
351 unsigned long orvals[16];
352 unsigned long andvals[16];
356 * callback for setting/clearing control bits
358 static void smp_ctl_bit_callback(void *info)
360 struct ec_creg_mask_parms *pp = info;
361 unsigned long cregs[16];
364 __ctl_store(cregs, 0, 15);
365 for (i = 0; i <= 15; i++)
366 cregs[i] = (cregs[i] & pp->andvals[i]) | pp->orvals[i];
367 __ctl_load(cregs, 0, 15);
371 * Set a bit in a control register of all cpus
373 void smp_ctl_set_bit(int cr, int bit)
375 struct ec_creg_mask_parms parms;
377 memset(&parms.orvals, 0, sizeof(parms.orvals));
378 memset(&parms.andvals, 0xff, sizeof(parms.andvals));
379 parms.orvals[cr] = 1 << bit;
380 on_each_cpu(smp_ctl_bit_callback, &parms, 0, 1);
382 EXPORT_SYMBOL(smp_ctl_set_bit);
385 * Clear a bit in a control register of all cpus
387 void smp_ctl_clear_bit(int cr, int bit)
389 struct ec_creg_mask_parms parms;
391 memset(&parms.orvals, 0, sizeof(parms.orvals));
392 memset(&parms.andvals, 0xff, sizeof(parms.andvals));
393 parms.andvals[cr] = ~(1L << bit);
394 on_each_cpu(smp_ctl_bit_callback, &parms, 0, 1);
396 EXPORT_SYMBOL(smp_ctl_clear_bit);
398 #if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_ZFCPDUMP_MODULE)
401 * zfcpdump_prefix_array holds prefix registers for the following scenario:
402 * 64 bit zfcpdump kernel and 31 bit kernel which is to be dumped. We have to
403 * save its prefix registers, since they get lost, when switching from 31 bit
406 unsigned int zfcpdump_prefix_array[NR_CPUS + 1] \
407 __attribute__((__section__(".data")));
409 static void __init smp_get_save_area(unsigned int cpu, unsigned int phy_cpu)
411 if (ipl_info.type != IPL_TYPE_FCP_DUMP)
413 if (cpu >= NR_CPUS) {
414 printk(KERN_WARNING "Registers for cpu %i not saved since dump "
415 "kernel was compiled with NR_CPUS=%i\n", cpu, NR_CPUS);
418 zfcpdump_save_areas[cpu] = alloc_bootmem(sizeof(union save_area));
419 __cpu_logical_map[1] = (__u16) phy_cpu;
420 while (signal_processor(1, sigp_stop_and_store_status) == sigp_busy)
422 memcpy(zfcpdump_save_areas[cpu],
423 (void *)(unsigned long) store_prefix() + SAVE_AREA_BASE,
426 /* copy original prefix register */
427 zfcpdump_save_areas[cpu]->s390x.pref_reg = zfcpdump_prefix_array[cpu];
431 union save_area *zfcpdump_save_areas[NR_CPUS + 1];
432 EXPORT_SYMBOL_GPL(zfcpdump_save_areas);
436 static inline void smp_get_save_area(unsigned int cpu, unsigned int phy_cpu) { }
438 #endif /* CONFIG_ZFCPDUMP || CONFIG_ZFCPDUMP_MODULE */
441 * Lets check how many CPUs we have.
443 static unsigned int __init smp_count_cpus(void)
445 unsigned int cpu, num_cpus;
449 * cpu 0 is the boot cpu. See smp_prepare_boot_cpu.
451 boot_cpu_addr = S390_lowcore.cpu_data.cpu_addr;
452 current_thread_info()->cpu = 0;
454 for (cpu = 0; cpu <= 65535; cpu++) {
455 if ((__u16) cpu == boot_cpu_addr)
457 __cpu_logical_map[1] = (__u16) cpu;
458 if (signal_processor(1, sigp_sense) == sigp_not_operational)
460 smp_get_save_area(num_cpus, cpu);
463 printk("Detected %d CPU's\n", (int) num_cpus);
464 printk("Boot cpu address %2X\n", boot_cpu_addr);
469 * Activate a secondary processor.
471 int __cpuinit start_secondary(void *cpuvoid)
476 /* Enable TOD clock interrupts on the secondary cpu. */
478 #ifdef CONFIG_VIRT_TIMER
479 /* Enable cpu timer interrupts on the secondary cpu. */
482 /* Enable pfault pseudo page faults on this cpu. */
485 /* Mark this cpu as online */
486 cpu_set(smp_processor_id(), cpu_online_map);
487 /* Switch on interrupts */
489 /* Print info about this processor */
490 print_cpu_info(&S390_lowcore.cpu_data);
491 /* cpu_idle will call schedule for us */
496 static void __init smp_create_idle(unsigned int cpu)
498 struct task_struct *p;
501 * don't care about the psw and regs settings since we'll never
502 * reschedule the forked task.
506 panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p));
507 current_set[cpu] = p;
510 static int cpu_stopped(int cpu)
514 /* Check for stopped state */
515 if (signal_processor_ps(&status, 0, cpu, sigp_sense) ==
516 sigp_status_stored) {
523 /* Upping and downing of CPUs */
525 int __cpu_up(unsigned int cpu)
527 struct task_struct *idle;
528 struct _lowcore *cpu_lowcore;
529 struct stack_frame *sf;
533 for (curr_cpu = 0; curr_cpu <= 65535; curr_cpu++) {
534 __cpu_logical_map[cpu] = (__u16) curr_cpu;
535 if (cpu_stopped(cpu))
539 if (!cpu_stopped(cpu))
542 ccode = signal_processor_p((__u32)(unsigned long)(lowcore_ptr[cpu]),
543 cpu, sigp_set_prefix);
545 printk("sigp_set_prefix failed for cpu %d "
546 "with condition code %d\n",
547 (int) cpu, (int) ccode);
551 idle = current_set[cpu];
552 cpu_lowcore = lowcore_ptr[cpu];
553 cpu_lowcore->kernel_stack = (unsigned long)
554 task_stack_page(idle) + THREAD_SIZE;
555 sf = (struct stack_frame *) (cpu_lowcore->kernel_stack
556 - sizeof(struct pt_regs)
557 - sizeof(struct stack_frame));
558 memset(sf, 0, sizeof(struct stack_frame));
559 sf->gprs[9] = (unsigned long) sf;
560 cpu_lowcore->save_area[15] = (unsigned long) sf;
561 __ctl_store(cpu_lowcore->cregs_save_area[0], 0, 15);
564 : : "a" (&cpu_lowcore->access_regs_save_area) : "memory");
565 cpu_lowcore->percpu_offset = __per_cpu_offset[cpu];
566 cpu_lowcore->current_task = (unsigned long) idle;
567 cpu_lowcore->cpu_data.cpu_nr = cpu;
570 while (signal_processor(cpu, sigp_restart) == sigp_busy)
573 while (!cpu_online(cpu))
578 static unsigned int __initdata additional_cpus;
579 static unsigned int __initdata possible_cpus;
581 void __init smp_setup_cpu_possible_map(void)
583 unsigned int phy_cpus, pos_cpus, cpu;
585 phy_cpus = smp_count_cpus();
586 pos_cpus = min(phy_cpus + additional_cpus, (unsigned int) NR_CPUS);
589 pos_cpus = min(possible_cpus, (unsigned int) NR_CPUS);
591 for (cpu = 0; cpu < pos_cpus; cpu++)
592 cpu_set(cpu, cpu_possible_map);
594 phy_cpus = min(phy_cpus, pos_cpus);
596 for (cpu = 0; cpu < phy_cpus; cpu++)
597 cpu_set(cpu, cpu_present_map);
600 #ifdef CONFIG_HOTPLUG_CPU
602 static int __init setup_additional_cpus(char *s)
604 additional_cpus = simple_strtoul(s, NULL, 0);
607 early_param("additional_cpus", setup_additional_cpus);
609 static int __init setup_possible_cpus(char *s)
611 possible_cpus = simple_strtoul(s, NULL, 0);
614 early_param("possible_cpus", setup_possible_cpus);
616 int __cpu_disable(void)
618 struct ec_creg_mask_parms cr_parms;
619 int cpu = smp_processor_id();
621 cpu_clear(cpu, cpu_online_map);
623 /* Disable pfault pseudo page faults on this cpu. */
626 memset(&cr_parms.orvals, 0, sizeof(cr_parms.orvals));
627 memset(&cr_parms.andvals, 0xff, sizeof(cr_parms.andvals));
629 /* disable all external interrupts */
630 cr_parms.orvals[0] = 0;
631 cr_parms.andvals[0] = ~(1 << 15 | 1 << 14 | 1 << 13 | 1 << 12 |
632 1 << 11 | 1 << 10 | 1 << 6 | 1 << 4);
633 /* disable all I/O interrupts */
634 cr_parms.orvals[6] = 0;
635 cr_parms.andvals[6] = ~(1 << 31 | 1 << 30 | 1 << 29 | 1 << 28 |
636 1 << 27 | 1 << 26 | 1 << 25 | 1 << 24);
637 /* disable most machine checks */
638 cr_parms.orvals[14] = 0;
639 cr_parms.andvals[14] = ~(1 << 28 | 1 << 27 | 1 << 26 |
642 smp_ctl_bit_callback(&cr_parms);
647 void __cpu_die(unsigned int cpu)
649 /* Wait until target cpu is down */
650 while (!smp_cpu_not_running(cpu))
652 printk("Processor %d spun down\n", cpu);
658 signal_processor(smp_processor_id(), sigp_stop);
663 #endif /* CONFIG_HOTPLUG_CPU */
666 * Cycle through the processors and setup structures.
669 void __init smp_prepare_cpus(unsigned int max_cpus)
675 /* request the 0x1201 emergency signal external interrupt */
676 if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0)
677 panic("Couldn't request external interrupt 0x1201");
678 memset(lowcore_ptr, 0, sizeof(lowcore_ptr));
680 * Initialize prefix pages and stacks for all possible cpus
682 print_cpu_info(&S390_lowcore.cpu_data);
684 for_each_possible_cpu(i) {
685 lowcore_ptr[i] = (struct _lowcore *)
686 __get_free_pages(GFP_KERNEL | GFP_DMA,
687 sizeof(void*) == 8 ? 1 : 0);
688 stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER);
689 if (!lowcore_ptr[i] || !stack)
690 panic("smp_boot_cpus failed to allocate memory\n");
692 *(lowcore_ptr[i]) = S390_lowcore;
693 lowcore_ptr[i]->async_stack = stack + ASYNC_SIZE;
694 stack = __get_free_pages(GFP_KERNEL, 0);
696 panic("smp_boot_cpus failed to allocate memory\n");
697 lowcore_ptr[i]->panic_stack = stack + PAGE_SIZE;
699 if (MACHINE_HAS_IEEE) {
700 lowcore_ptr[i]->extended_save_area_addr =
701 (__u32) __get_free_pages(GFP_KERNEL, 0);
702 if (!lowcore_ptr[i]->extended_save_area_addr)
703 panic("smp_boot_cpus failed to "
704 "allocate memory\n");
709 if (MACHINE_HAS_IEEE)
710 ctl_set_bit(14, 29); /* enable extended save area */
712 set_prefix((u32)(unsigned long) lowcore_ptr[smp_processor_id()]);
714 for_each_possible_cpu(cpu)
715 if (cpu != smp_processor_id())
716 smp_create_idle(cpu);
719 void __init smp_prepare_boot_cpu(void)
721 BUG_ON(smp_processor_id() != 0);
723 cpu_set(0, cpu_online_map);
724 S390_lowcore.percpu_offset = __per_cpu_offset[0];
725 current_set[0] = current;
728 void __init smp_cpus_done(unsigned int max_cpus)
730 cpu_present_map = cpu_possible_map;
734 * the frequency of the profiling timer can be changed
735 * by writing a multiplier value into /proc/profile.
737 * usually you want to run this on all CPUs ;)
739 int setup_profiling_timer(unsigned int multiplier)
744 static DEFINE_PER_CPU(struct cpu, cpu_devices);
746 static ssize_t show_capability(struct sys_device *dev, char *buf)
748 unsigned int capability;
751 rc = get_cpu_capability(&capability);
754 return sprintf(buf, "%u\n", capability);
756 static SYSDEV_ATTR(capability, 0444, show_capability, NULL);
758 static int __cpuinit smp_cpu_notify(struct notifier_block *self,
759 unsigned long action, void *hcpu)
761 unsigned int cpu = (unsigned int)(long)hcpu;
762 struct cpu *c = &per_cpu(cpu_devices, cpu);
763 struct sys_device *s = &c->sysdev;
767 case CPU_ONLINE_FROZEN:
768 if (sysdev_create_file(s, &attr_capability))
772 case CPU_DEAD_FROZEN:
773 sysdev_remove_file(s, &attr_capability);
779 static struct notifier_block __cpuinitdata smp_cpu_nb = {
780 .notifier_call = smp_cpu_notify,
783 static int __init topology_init(void)
787 register_cpu_notifier(&smp_cpu_nb);
789 for_each_possible_cpu(cpu) {
790 struct cpu *c = &per_cpu(cpu_devices, cpu);
791 struct sys_device *s = &c->sysdev;
794 register_cpu(c, cpu);
795 if (!cpu_online(cpu))
798 sysdev_create_file(s, &attr_capability);
802 subsys_initcall(topology_init);