2 * linux/arch/alpha/kernel/smp.c
4 * 2001-07-09 Phil Ezolt (Phillip.Ezolt@compaq.com)
5 * Renamed modified smp_call_function to smp_call_function_on_cpu()
6 * Created an function that conforms to the old calling convention
7 * of smp_call_function().
9 * This is helpful for DCPI.
13 #include <linux/errno.h>
14 #include <linux/kernel.h>
15 #include <linux/kernel_stat.h>
16 #include <linux/module.h>
17 #include <linux/sched.h>
19 #include <linux/threads.h>
20 #include <linux/smp.h>
21 #include <linux/smp_lock.h>
22 #include <linux/interrupt.h>
23 #include <linux/init.h>
24 #include <linux/delay.h>
25 #include <linux/spinlock.h>
26 #include <linux/irq.h>
27 #include <linux/cache.h>
28 #include <linux/profile.h>
29 #include <linux/bitops.h>
31 #include <asm/hwrpb.h>
32 #include <asm/ptrace.h>
33 #include <asm/atomic.h>
37 #include <asm/pgtable.h>
38 #include <asm/pgalloc.h>
39 #include <asm/mmu_context.h>
40 #include <asm/tlbflush.h>
48 #define DBGS(args) printk args
53 /* A collection of per-processor data. */
54 struct cpuinfo_alpha cpu_data[NR_CPUS];
56 /* A collection of single bit ipi messages. */
58 unsigned long bits ____cacheline_aligned;
59 } ipi_data[NR_CPUS] __cacheline_aligned;
61 enum ipi_message_type {
67 /* Set to a secondary's cpuid when it comes online. */
68 static int smp_secondary_alive __initdata = 0;
70 /* Which cpus ids came online. */
71 cpumask_t cpu_online_map;
73 EXPORT_SYMBOL(cpu_online_map);
75 int smp_num_probed; /* Internal processor count */
76 int smp_num_cpus = 1; /* Number that came online. */
78 extern void calibrate_delay(void);
83 * Called by both boot and secondaries to move global data into
84 * per-processor storage.
86 static inline void __init
87 smp_store_cpu_info(int cpuid)
89 cpu_data[cpuid].loops_per_jiffy = loops_per_jiffy;
90 cpu_data[cpuid].last_asn = ASN_FIRST_VERSION;
91 cpu_data[cpuid].need_new_asn = 0;
92 cpu_data[cpuid].asn_lock = 0;
96 * Ideally sets up per-cpu profiling hooks. Doesn't do much now...
98 static inline void __init
99 smp_setup_percpu_timer(int cpuid)
101 cpu_data[cpuid].prof_counter = 1;
102 cpu_data[cpuid].prof_multiplier = 1;
106 wait_boot_cpu_to_stop(int cpuid)
108 unsigned long stop = jiffies + 10*HZ;
110 while (time_before(jiffies, stop)) {
111 if (!smp_secondary_alive)
116 printk("wait_boot_cpu_to_stop: FAILED on CPU %d, hanging now\n", cpuid);
122 * Where secondaries begin a life of C.
127 int cpuid = hard_smp_processor_id();
129 if (cpu_test_and_set(cpuid, cpu_online_map)) {
130 printk("??, cpu 0x%x already present??\n", cpuid);
134 /* Turn on machine checks. */
137 /* Set trap vectors. */
140 /* Set interrupt vector. */
143 /* Get our local ticker going. */
144 smp_setup_percpu_timer(cpuid);
146 /* Call platform-specific callin, if specified */
147 if (alpha_mv.smp_callin) alpha_mv.smp_callin();
149 /* All kernel threads share the same mm context. */
150 atomic_inc(&init_mm.mm_count);
151 current->active_mm = &init_mm;
153 /* Must have completely accurate bogos. */
156 /* Wait boot CPU to stop with irq enabled before running
158 wait_boot_cpu_to_stop(cpuid);
162 smp_store_cpu_info(cpuid);
163 /* Allow master to continue only after we written loops_per_jiffy. */
165 smp_secondary_alive = 1;
167 DBGS(("smp_callin: commencing CPU %d current %p active_mm %p\n",
168 cpuid, current, current->active_mm));
174 /* Wait until hwrpb->txrdy is clear for cpu. Return -1 on timeout. */
176 wait_for_txrdy (unsigned long cpumask)
178 unsigned long timeout;
180 if (!(hwrpb->txrdy & cpumask))
183 timeout = jiffies + 10*HZ;
184 while (time_before(jiffies, timeout)) {
185 if (!(hwrpb->txrdy & cpumask))
195 * Send a message to a secondary's console. "START" is one such
196 * interesting message. ;-)
199 send_secondary_console_msg(char *str, int cpuid)
201 struct percpu_struct *cpu;
202 register char *cp1, *cp2;
203 unsigned long cpumask;
206 cpu = (struct percpu_struct *)
208 + hwrpb->processor_offset
209 + cpuid * hwrpb->processor_size);
211 cpumask = (1UL << cpuid);
212 if (wait_for_txrdy(cpumask))
217 *(unsigned int *)&cpu->ipc_buffer[0] = len;
218 cp1 = (char *) &cpu->ipc_buffer[1];
219 memcpy(cp1, cp2, len);
221 /* atomic test and set */
223 set_bit(cpuid, &hwrpb->rxrdy);
225 if (wait_for_txrdy(cpumask))
230 printk("Processor %x not ready\n", cpuid);
234 * A secondary console wants to send a message. Receive it.
237 recv_secondary_console_msg(void)
240 unsigned long txrdy = hwrpb->txrdy;
241 char *cp1, *cp2, buf[80];
242 struct percpu_struct *cpu;
244 DBGS(("recv_secondary_console_msg: TXRDY 0x%lx.\n", txrdy));
246 mycpu = hard_smp_processor_id();
248 for (i = 0; i < NR_CPUS; i++) {
249 if (!(txrdy & (1UL << i)))
252 DBGS(("recv_secondary_console_msg: "
253 "TXRDY contains CPU %d.\n", i));
255 cpu = (struct percpu_struct *)
257 + hwrpb->processor_offset
258 + i * hwrpb->processor_size);
260 DBGS(("recv_secondary_console_msg: on %d from %d"
261 " HALT_REASON 0x%lx FLAGS 0x%lx\n",
262 mycpu, i, cpu->halt_reason, cpu->flags));
264 cnt = cpu->ipc_buffer[0] >> 32;
265 if (cnt <= 0 || cnt >= 80)
266 strcpy(buf, "<<< BOGUS MSG >>>");
268 cp1 = (char *) &cpu->ipc_buffer[11];
272 while ((cp2 = strchr(cp2, '\r')) != 0) {
279 DBGS((KERN_INFO "recv_secondary_console_msg: on %d "
280 "message is '%s'\n", mycpu, buf));
287 * Convince the console to have a secondary cpu begin execution.
290 secondary_cpu_start(int cpuid, struct task_struct *idle)
292 struct percpu_struct *cpu;
293 struct pcb_struct *hwpcb, *ipcb;
294 unsigned long timeout;
296 cpu = (struct percpu_struct *)
298 + hwrpb->processor_offset
299 + cpuid * hwrpb->processor_size);
300 hwpcb = (struct pcb_struct *) cpu->hwpcb;
301 ipcb = &task_thread_info(idle)->pcb;
303 /* Initialize the CPU's HWPCB to something just good enough for
304 us to get started. Immediately after starting, we'll swpctx
305 to the target idle task's pcb. Reuse the stack in the mean
306 time. Precalculate the target PCBB. */
307 hwpcb->ksp = (unsigned long)ipcb + sizeof(union thread_union) - 16;
309 hwpcb->ptbr = ipcb->ptbr;
312 hwpcb->unique = virt_to_phys(ipcb);
313 hwpcb->flags = ipcb->flags;
314 hwpcb->res1 = hwpcb->res2 = 0;
317 DBGS(("KSP 0x%lx PTBR 0x%lx VPTBR 0x%lx UNIQUE 0x%lx\n",
318 hwpcb->ksp, hwpcb->ptbr, hwrpb->vptb, hwpcb->unique));
320 DBGS(("Starting secondary cpu %d: state 0x%lx pal_flags 0x%lx\n",
321 cpuid, idle->state, ipcb->flags));
323 /* Setup HWRPB fields that SRM uses to activate secondary CPU */
324 hwrpb->CPU_restart = __smp_callin;
325 hwrpb->CPU_restart_data = (unsigned long) __smp_callin;
327 /* Recalculate and update the HWRPB checksum */
328 hwrpb_update_checksum(hwrpb);
331 * Send a "start" command to the specified processor.
334 /* SRM III 3.4.1.3 */
335 cpu->flags |= 0x22; /* turn on Context Valid and Restart Capable */
336 cpu->flags &= ~1; /* turn off Bootstrap In Progress */
339 send_secondary_console_msg("START\r\n", cpuid);
341 /* Wait 10 seconds for an ACK from the console. */
342 timeout = jiffies + 10*HZ;
343 while (time_before(jiffies, timeout)) {
349 printk(KERN_ERR "SMP: Processor %d failed to start.\n", cpuid);
353 DBGS(("secondary_cpu_start: SUCCESS for CPU %d!!!\n", cpuid));
358 * Bring one cpu online.
361 smp_boot_one_cpu(int cpuid)
363 struct task_struct *idle;
364 unsigned long timeout;
366 /* Cook up an idler for this guy. Note that the address we
367 give to kernel_thread is irrelevant -- it's going to start
368 where HWRPB.CPU_restart says to start. But this gets all
369 the other task-y sort of data structures set up like we
370 wish. We can't use kernel_thread since we must avoid
371 rescheduling the child. */
372 idle = fork_idle(cpuid);
374 panic("failed fork for CPU %d", cpuid);
376 DBGS(("smp_boot_one_cpu: CPU %d state 0x%lx flags 0x%lx\n",
377 cpuid, idle->state, idle->flags));
379 /* Signal the secondary to wait a moment. */
380 smp_secondary_alive = -1;
382 /* Whirrr, whirrr, whirrrrrrrrr... */
383 if (secondary_cpu_start(cpuid, idle))
386 /* Notify the secondary CPU it can run calibrate_delay. */
388 smp_secondary_alive = 0;
390 /* We've been acked by the console; wait one second for
391 the task to start up for real. */
392 timeout = jiffies + 1*HZ;
393 while (time_before(jiffies, timeout)) {
394 if (smp_secondary_alive == 1)
400 /* We failed to boot the CPU. */
402 printk(KERN_ERR "SMP: Processor %d is stuck.\n", cpuid);
406 /* Another "Red Snapper". */
411 * Called from setup_arch. Detect an SMP system and which processors
417 struct percpu_struct *cpubase, *cpu;
420 if (boot_cpuid != 0) {
421 printk(KERN_WARNING "SMP: Booting off cpu %d instead of 0?\n",
425 if (hwrpb->nr_processors > 1) {
428 DBGS(("setup_smp: nr_processors %ld\n",
429 hwrpb->nr_processors));
431 cpubase = (struct percpu_struct *)
432 ((char*)hwrpb + hwrpb->processor_offset);
433 boot_cpu_palrev = cpubase->pal_revision;
435 for (i = 0; i < hwrpb->nr_processors; i++) {
436 cpu = (struct percpu_struct *)
437 ((char *)cpubase + i*hwrpb->processor_size);
438 if ((cpu->flags & 0x1cc) == 0x1cc) {
440 /* Assume here that "whami" == index */
441 cpu_set(i, cpu_present_map);
442 cpu->pal_revision = boot_cpu_palrev;
445 DBGS(("setup_smp: CPU %d: flags 0x%lx type 0x%lx\n",
446 i, cpu->flags, cpu->type));
447 DBGS(("setup_smp: CPU %d: PAL rev 0x%lx\n",
448 i, cpu->pal_revision));
454 printk(KERN_INFO "SMP: %d CPUs probed -- cpu_present_map = %lx\n",
455 smp_num_probed, cpu_present_map.bits[0]);
459 * Called by smp_init prepare the secondaries
462 smp_prepare_cpus(unsigned int max_cpus)
464 /* Take care of some initial bookkeeping. */
465 memset(ipi_data, 0, sizeof(ipi_data));
467 current_thread_info()->cpu = boot_cpuid;
469 smp_store_cpu_info(boot_cpuid);
470 smp_setup_percpu_timer(boot_cpuid);
472 /* Nothing to do on a UP box, or when told not to. */
473 if (smp_num_probed == 1 || max_cpus == 0) {
474 cpu_present_map = cpumask_of_cpu(boot_cpuid);
475 printk(KERN_INFO "SMP mode deactivated.\n");
479 printk(KERN_INFO "SMP starting up secondaries.\n");
481 smp_num_cpus = smp_num_probed;
485 smp_prepare_boot_cpu(void)
490 __cpu_up(unsigned int cpu)
492 smp_boot_one_cpu(cpu);
494 return cpu_online(cpu) ? 0 : -ENOSYS;
498 smp_cpus_done(unsigned int max_cpus)
501 unsigned long bogosum = 0;
503 for(cpu = 0; cpu < NR_CPUS; cpu++)
505 bogosum += cpu_data[cpu].loops_per_jiffy;
507 printk(KERN_INFO "SMP: Total of %d processors activated "
508 "(%lu.%02lu BogoMIPS).\n",
510 (bogosum + 2500) / (500000/HZ),
511 ((bogosum + 2500) / (5000/HZ)) % 100);
516 smp_percpu_timer_interrupt(struct pt_regs *regs)
518 struct pt_regs *old_regs;
519 int cpu = smp_processor_id();
520 unsigned long user = user_mode(regs);
521 struct cpuinfo_alpha *data = &cpu_data[cpu];
523 old_regs = set_irq_regs(regs);
525 /* Record kernel PC. */
526 profile_tick(CPU_PROFILING);
528 if (!--data->prof_counter) {
529 /* We need to make like a normal interrupt -- otherwise
530 timer interrupts ignore the global interrupt lock,
531 which would be a Bad Thing. */
534 update_process_times(user);
536 data->prof_counter = data->prof_multiplier;
540 set_irq_regs(old_regs);
544 setup_profiling_timer(unsigned int multiplier)
551 send_ipi_message(cpumask_t to_whom, enum ipi_message_type operation)
556 for_each_cpu_mask(i, to_whom)
557 set_bit(operation, &ipi_data[i].bits);
560 for_each_cpu_mask(i, to_whom)
564 /* Structure and data for smp_call_function. This is designed to
565 minimize static memory requirements. Plus it looks cleaner. */
567 struct smp_call_struct {
568 void (*func) (void *info);
571 atomic_t unstarted_count;
572 atomic_t unfinished_count;
575 static struct smp_call_struct *smp_call_function_data;
577 /* Atomicly drop data into a shared pointer. The pointer is free if
578 it is initially locked. If retry, spin until free. */
581 pointer_lock (void *lock, void *data, int retry)
587 /* Compare and swap with zero. */
595 : "=&r"(old), "=m"(*(void **)lock), "=&r"(tmp)
604 while (*(void **)lock)
610 handle_ipi(struct pt_regs *regs)
612 int this_cpu = smp_processor_id();
613 unsigned long *pending_ipis = &ipi_data[this_cpu].bits;
617 DBGS(("handle_ipi: on CPU %d ops 0x%lx PC 0x%lx\n",
618 this_cpu, *pending_ipis, regs->pc));
621 mb(); /* Order interrupt and bit testing. */
622 while ((ops = xchg(pending_ipis, 0)) != 0) {
623 mb(); /* Order bit clearing and data access. */
629 which = __ffs(which);
633 /* Reschedule callback. Everything to be done
634 is done by the interrupt return path. */
639 struct smp_call_struct *data;
640 void (*func)(void *info);
644 data = smp_call_function_data;
649 /* Notify the sending CPU that the data has been
650 received, and execution is about to begin. */
652 atomic_dec (&data->unstarted_count);
654 /* At this point the structure may be gone unless
658 /* Notify the sending CPU that the task is done. */
660 if (wait) atomic_dec (&data->unfinished_count);
668 printk(KERN_CRIT "Unknown IPI on CPU %d: %lu\n",
674 mb(); /* Order data access and bit testing. */
677 cpu_data[this_cpu].ipi_count++;
680 recv_secondary_console_msg();
684 smp_send_reschedule(int cpu)
687 if (cpu == hard_smp_processor_id())
689 "smp_send_reschedule: Sending IPI to self.\n");
691 send_ipi_message(cpumask_of_cpu(cpu), IPI_RESCHEDULE);
697 cpumask_t to_whom = cpu_possible_map;
698 cpu_clear(smp_processor_id(), to_whom);
700 if (hard_smp_processor_id() != boot_cpu_id)
701 printk(KERN_WARNING "smp_send_stop: Not on boot cpu.\n");
703 send_ipi_message(to_whom, IPI_CPU_STOP);
707 * Run a function on all other CPUs.
708 * <func> The function to run. This must be fast and non-blocking.
709 * <info> An arbitrary pointer to pass to the function.
710 * <retry> If true, keep retrying until ready.
711 * <wait> If true, wait until function has completed on other CPUs.
712 * [RETURNS] 0 on success, else a negative status code.
714 * Does not return until remote CPUs are nearly ready to execute <func>
715 * or are or have executed.
716 * You must not call this function with disabled interrupts or from a
717 * hardware interrupt handler or from a bottom half handler.
721 smp_call_function_on_cpu (void (*func) (void *info), void *info, int retry,
722 int wait, cpumask_t to_whom)
724 struct smp_call_struct data;
725 unsigned long timeout;
726 int num_cpus_to_call;
728 /* Can deadlock when called with interrupts disabled */
729 WARN_ON(irqs_disabled());
735 cpu_clear(smp_processor_id(), to_whom);
736 num_cpus_to_call = cpus_weight(to_whom);
738 atomic_set(&data.unstarted_count, num_cpus_to_call);
739 atomic_set(&data.unfinished_count, num_cpus_to_call);
741 /* Acquire the smp_call_function_data mutex. */
742 if (pointer_lock(&smp_call_function_data, &data, retry))
745 /* Send a message to the requested CPUs. */
746 send_ipi_message(to_whom, IPI_CALL_FUNC);
748 /* Wait for a minimal response. */
749 timeout = jiffies + HZ;
750 while (atomic_read (&data.unstarted_count) > 0
751 && time_before (jiffies, timeout))
754 /* If there's no response yet, log a message but allow a longer
755 * timeout period -- if we get a response this time, log
756 * a message saying when we got it..
758 if (atomic_read(&data.unstarted_count) > 0) {
759 long start_time = jiffies;
760 printk(KERN_ERR "%s: initial timeout -- trying long wait\n",
762 timeout = jiffies + 30 * HZ;
763 while (atomic_read(&data.unstarted_count) > 0
764 && time_before(jiffies, timeout))
766 if (atomic_read(&data.unstarted_count) <= 0) {
767 long delta = jiffies - start_time;
769 "%s: response %ld.%ld seconds into long wait\n",
770 __FUNCTION__, delta / HZ,
771 (100 * (delta - ((delta / HZ) * HZ))) / HZ);
775 /* We either got one or timed out -- clear the lock. */
777 smp_call_function_data = NULL;
780 * If after both the initial and long timeout periods we still don't
781 * have a response, something is very wrong...
783 BUG_ON(atomic_read (&data.unstarted_count) > 0);
785 /* Wait for a complete response, if needed. */
787 while (atomic_read (&data.unfinished_count) > 0)
795 smp_call_function (void (*func) (void *info), void *info, int retry, int wait)
797 return smp_call_function_on_cpu (func, info, retry, wait,
802 ipi_imb(void *ignored)
810 /* Must wait other processors to flush their icache before continue. */
811 if (on_each_cpu(ipi_imb, NULL, 1, 1))
812 printk(KERN_CRIT "smp_imb: timed out\n");
816 ipi_flush_tlb_all(void *ignored)
824 /* Although we don't have any data to pass, we do want to
825 synchronize with the other processors. */
826 if (on_each_cpu(ipi_flush_tlb_all, NULL, 1, 1)) {
827 printk(KERN_CRIT "flush_tlb_all: timed out\n");
831 #define asn_locked() (cpu_data[smp_processor_id()].asn_lock)
834 ipi_flush_tlb_mm(void *x)
836 struct mm_struct *mm = (struct mm_struct *) x;
837 if (mm == current->active_mm && !asn_locked())
838 flush_tlb_current(mm);
844 flush_tlb_mm(struct mm_struct *mm)
848 if (mm == current->active_mm) {
849 flush_tlb_current(mm);
850 if (atomic_read(&mm->mm_users) <= 1) {
851 int cpu, this_cpu = smp_processor_id();
852 for (cpu = 0; cpu < NR_CPUS; cpu++) {
853 if (!cpu_online(cpu) || cpu == this_cpu)
855 if (mm->context[cpu])
856 mm->context[cpu] = 0;
863 if (smp_call_function(ipi_flush_tlb_mm, mm, 1, 1)) {
864 printk(KERN_CRIT "flush_tlb_mm: timed out\n");
870 struct flush_tlb_page_struct {
871 struct vm_area_struct *vma;
872 struct mm_struct *mm;
877 ipi_flush_tlb_page(void *x)
879 struct flush_tlb_page_struct *data = (struct flush_tlb_page_struct *)x;
880 struct mm_struct * mm = data->mm;
882 if (mm == current->active_mm && !asn_locked())
883 flush_tlb_current_page(mm, data->vma, data->addr);
889 flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
891 struct flush_tlb_page_struct data;
892 struct mm_struct *mm = vma->vm_mm;
896 if (mm == current->active_mm) {
897 flush_tlb_current_page(mm, vma, addr);
898 if (atomic_read(&mm->mm_users) <= 1) {
899 int cpu, this_cpu = smp_processor_id();
900 for (cpu = 0; cpu < NR_CPUS; cpu++) {
901 if (!cpu_online(cpu) || cpu == this_cpu)
903 if (mm->context[cpu])
904 mm->context[cpu] = 0;
915 if (smp_call_function(ipi_flush_tlb_page, &data, 1, 1)) {
916 printk(KERN_CRIT "flush_tlb_page: timed out\n");
923 flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
925 /* On the Alpha we always flush the whole user tlb. */
926 flush_tlb_mm(vma->vm_mm);
930 ipi_flush_icache_page(void *x)
932 struct mm_struct *mm = (struct mm_struct *) x;
933 if (mm == current->active_mm && !asn_locked())
934 __load_new_mm_context(mm);
940 flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
941 unsigned long addr, int len)
943 struct mm_struct *mm = vma->vm_mm;
945 if ((vma->vm_flags & VM_EXEC) == 0)
950 if (mm == current->active_mm) {
951 __load_new_mm_context(mm);
952 if (atomic_read(&mm->mm_users) <= 1) {
953 int cpu, this_cpu = smp_processor_id();
954 for (cpu = 0; cpu < NR_CPUS; cpu++) {
955 if (!cpu_online(cpu) || cpu == this_cpu)
957 if (mm->context[cpu])
958 mm->context[cpu] = 0;
965 if (smp_call_function(ipi_flush_icache_page, mm, 1, 1)) {
966 printk(KERN_CRIT "flush_icache_page: timed out\n");