4 * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
5 * Copyright (C) 1999, 2001, 2003 David Mosberger-Tang <davidm@hpl.hp.com>
7 * Lots of stuff stolen from arch/alpha/kernel/smp.c
9 * 01/05/16 Rohit Seth <rohit.seth@intel.com> IA64-SMP functions. Reorganized
10 * the existing code (on the lines of x86 port).
11 * 00/09/11 David Mosberger <davidm@hpl.hp.com> Do loops_per_jiffy
12 * calibration on each CPU.
13 * 00/08/23 Asit Mallick <asit.k.mallick@intel.com> fixed logical processor id
14 * 00/03/31 Rohit Seth <rohit.seth@intel.com> Fixes for Bootstrap Processor
15 * & cpu_online_map now gets done here (instead of setup.c)
16 * 99/10/05 davidm Update to bring it in sync with new command-line processing
18 * 10/13/00 Goutham Rao <goutham.rao@intel.com> Updated smp_call_function and
19 * smp_call_function_single to resend IPI on timeouts
21 #include <linux/module.h>
22 #include <linux/kernel.h>
23 #include <linux/sched.h>
24 #include <linux/init.h>
25 #include <linux/interrupt.h>
26 #include <linux/smp.h>
27 #include <linux/kernel_stat.h>
29 #include <linux/cache.h>
30 #include <linux/delay.h>
31 #include <linux/efi.h>
32 #include <linux/bitops.h>
33 #include <linux/kexec.h>
35 #include <asm/atomic.h>
36 #include <asm/current.h>
37 #include <asm/delay.h>
38 #include <asm/machvec.h>
42 #include <asm/pgalloc.h>
43 #include <asm/pgtable.h>
44 #include <asm/processor.h>
45 #include <asm/ptrace.h>
47 #include <asm/system.h>
48 #include <asm/tlbflush.h>
49 #include <asm/unistd.h>
53 * Note: alignment of 4 entries/cacheline was empirically determined
54 * to be a good tradeoff between hot cachelines & spreading the array
55 * across too many cacheline.
57 static struct local_tlb_flush_counts {
59 } __attribute__((__aligned__(32))) local_tlb_flush_counts[NR_CPUS];
61 static DEFINE_PER_CPU(unsigned int, shadow_flush_counts[NR_CPUS]) ____cacheline_aligned;
65 * Structure and data for smp_call_function(). This is designed to minimise static memory
66 * requirements. It also looks cleaner.
68 static __cacheline_aligned DEFINE_SPINLOCK(call_lock);
70 struct call_data_struct {
71 void (*func) (void *info);
78 static volatile struct call_data_struct *call_data;
80 #define IPI_CALL_FUNC 0
81 #define IPI_CPU_STOP 1
82 #define IPI_KDUMP_CPU_STOP 3
84 /* This needs to be cacheline aligned because it is written to by *other* CPUs. */
85 static DEFINE_PER_CPU_SHARED_ALIGNED(u64, ipi_operation);
87 extern void cpu_halt (void);
90 lock_ipi_calllock(void)
92 spin_lock_irq(&call_lock);
96 unlock_ipi_calllock(void)
98 spin_unlock_irq(&call_lock);
107 cpu_clear(smp_processor_id(), cpu_online_map);
119 /* Should never be here */
125 handle_IPI (int irq, void *dev_id)
127 int this_cpu = get_cpu();
128 unsigned long *pending_ipis = &__ia64_per_cpu_var(ipi_operation);
131 mb(); /* Order interrupt and bit testing. */
132 while ((ops = xchg(pending_ipis, 0)) != 0) {
133 mb(); /* Order bit clearing and data access. */
138 ops &= ~(1 << which);
143 struct call_data_struct *data;
144 void (*func)(void *info);
148 /* release the 'pointer lock' */
149 data = (struct call_data_struct *) call_data;
155 atomic_inc(&data->started);
157 * At this point the structure may be gone unless
162 /* Notify the sending CPU that the task is done. */
165 atomic_inc(&data->finished);
173 case IPI_KDUMP_CPU_STOP:
174 unw_init_running(kdump_cpu_freeze, NULL);
178 printk(KERN_CRIT "Unknown IPI on CPU %d: %lu\n", this_cpu, which);
182 mb(); /* Order data access and bit testing. */
189 * Called with preemption disabled.
192 send_IPI_single (int dest_cpu, int op)
194 set_bit(op, &per_cpu(ipi_operation, dest_cpu));
195 platform_send_ipi(dest_cpu, IA64_IPI_VECTOR, IA64_IPI_DM_INT, 0);
199 * Called with preemption disabled.
202 send_IPI_allbutself (int op)
206 for_each_online_cpu(i) {
207 if (i != smp_processor_id())
208 send_IPI_single(i, op);
213 * Called with preemption disabled.
216 send_IPI_mask(cpumask_t mask, int op)
220 for_each_cpu_mask(cpu, mask) {
221 send_IPI_single(cpu, op);
226 * Called with preemption disabled.
229 send_IPI_all (int op)
233 for_each_online_cpu(i) {
234 send_IPI_single(i, op);
239 * Called with preemption disabled.
242 send_IPI_self (int op)
244 send_IPI_single(smp_processor_id(), op);
249 kdump_smp_send_stop(void)
251 send_IPI_allbutself(IPI_KDUMP_CPU_STOP);
255 kdump_smp_send_init(void)
257 unsigned int cpu, self_cpu;
258 self_cpu = smp_processor_id();
259 for_each_online_cpu(cpu) {
260 if (cpu != self_cpu) {
261 if(kdump_status[cpu] == 0)
262 platform_send_ipi(cpu, 0, IA64_IPI_DM_INIT, 0);
268 * Called with preemption disabled.
271 smp_send_reschedule (int cpu)
273 platform_send_ipi(cpu, IA64_IPI_RESCHEDULE, IA64_IPI_DM_INT, 0);
277 * Called with preemption disabled.
280 smp_send_local_flush_tlb (int cpu)
282 platform_send_ipi(cpu, IA64_IPI_LOCAL_TLB_FLUSH, IA64_IPI_DM_INT, 0);
286 smp_local_flush_tlb(void)
289 * Use atomic ops. Otherwise, the load/increment/store sequence from
290 * a "++" operation can have the line stolen between the load & store.
291 * The overhead of the atomic op in negligible in this case & offers
292 * significant benefit for the brief periods where lots of cpus
293 * are simultaneously flushing TLBs.
295 ia64_fetchadd(1, &local_tlb_flush_counts[smp_processor_id()].count, acq);
296 local_flush_tlb_all();
299 #define FLUSH_DELAY 5 /* Usec backoff to eliminate excessive cacheline bouncing */
302 smp_flush_tlb_cpumask(cpumask_t xcpumask)
304 unsigned int *counts = __ia64_per_cpu_var(shadow_flush_counts);
305 cpumask_t cpumask = xcpumask;
306 int mycpu, cpu, flush_mycpu = 0;
309 mycpu = smp_processor_id();
311 for_each_cpu_mask(cpu, cpumask)
312 counts[cpu] = local_tlb_flush_counts[cpu].count;
315 for_each_cpu_mask(cpu, cpumask) {
319 smp_send_local_flush_tlb(cpu);
323 smp_local_flush_tlb();
325 for_each_cpu_mask(cpu, cpumask)
326 while(counts[cpu] == local_tlb_flush_counts[cpu].count)
333 smp_flush_tlb_all (void)
335 on_each_cpu((void (*)(void *))local_flush_tlb_all, NULL, 1, 1);
339 smp_flush_tlb_mm (struct mm_struct *mm)
342 /* this happens for the common case of a single-threaded fork(): */
343 if (likely(mm == current->active_mm && atomic_read(&mm->mm_users) == 1))
345 local_finish_flush_tlb_mm(mm);
352 * We could optimize this further by using mm->cpu_vm_mask to track which CPUs
353 * have been running in the address space. It's not clear that this is worth the
354 * trouble though: to avoid races, we have to raise the IPI on the target CPU
355 * anyhow, and once a CPU is interrupted, the cost of local_flush_tlb_all() is
358 on_each_cpu((void (*)(void *))local_finish_flush_tlb_mm, mm, 1, 1);
362 * Run a function on a specific CPU
363 * <func> The function to run. This must be fast and non-blocking.
364 * <info> An arbitrary pointer to pass to the function.
365 * <nonatomic> Currently unused.
366 * <wait> If true, wait until function has completed on other CPUs.
367 * [RETURNS] 0 on success, else a negative status code.
369 * Does not return until the remote CPU is nearly ready to execute <func>
370 * or is or has executed.
374 smp_call_function_single (int cpuid, void (*func) (void *info), void *info, int nonatomic,
377 struct call_data_struct data;
379 int me = get_cpu(); /* prevent preemption and reschedule on another processor */
391 atomic_set(&data.started, 0);
394 atomic_set(&data.finished, 0);
396 spin_lock_bh(&call_lock);
399 mb(); /* ensure store to call_data precedes setting of IPI_CALL_FUNC */
400 send_IPI_single(cpuid, IPI_CALL_FUNC);
402 /* Wait for response */
403 while (atomic_read(&data.started) != cpus)
407 while (atomic_read(&data.finished) != cpus)
411 spin_unlock_bh(&call_lock);
415 EXPORT_SYMBOL(smp_call_function_single);
418 * smp_call_function_mask(): Run a function on a set of other CPUs.
419 * <mask> The set of cpus to run on. Must not include the current cpu.
420 * <func> The function to run. This must be fast and non-blocking.
421 * <info> An arbitrary pointer to pass to the function.
422 * <wait> If true, wait (atomically) until function
423 * has completed on other CPUs.
425 * Returns 0 on success, else a negative status code.
427 * If @wait is true, then returns once @func has returned; otherwise
428 * it returns just before the target cpu calls @func.
430 * You must not call this function with disabled interrupts or from a
431 * hardware interrupt handler or from a bottom half handler.
433 int smp_call_function_mask(cpumask_t mask,
434 void (*func)(void *), void *info,
437 struct call_data_struct data;
438 cpumask_t allbutself;
441 spin_lock(&call_lock);
442 allbutself = cpu_online_map;
443 cpu_clear(smp_processor_id(), allbutself);
445 cpus_and(mask, mask, allbutself);
446 cpus = cpus_weight(mask);
448 spin_unlock(&call_lock);
452 /* Can deadlock when called with interrupts disabled */
453 WARN_ON(irqs_disabled());
457 atomic_set(&data.started, 0);
460 atomic_set(&data.finished, 0);
463 mb(); /* ensure store to call_data precedes setting of IPI_CALL_FUNC*/
465 /* Send a message to other CPUs */
466 if (cpus_equal(mask, allbutself))
467 send_IPI_allbutself(IPI_CALL_FUNC);
469 send_IPI_mask(mask, IPI_CALL_FUNC);
471 /* Wait for response */
472 while (atomic_read(&data.started) != cpus)
476 while (atomic_read(&data.finished) != cpus)
480 spin_unlock(&call_lock);
484 EXPORT_SYMBOL(smp_call_function_mask);
487 * this function sends a 'generic call function' IPI to all other CPUs
492 * [SUMMARY] Run a function on all other CPUs.
493 * <func> The function to run. This must be fast and non-blocking.
494 * <info> An arbitrary pointer to pass to the function.
495 * <nonatomic> currently unused.
496 * <wait> If true, wait (atomically) until function has completed on other CPUs.
497 * [RETURNS] 0 on success, else a negative status code.
499 * Does not return until remote CPUs are nearly ready to execute <func> or are or have
502 * You must not call this function with disabled interrupts or from a
503 * hardware interrupt handler or from a bottom half handler.
506 smp_call_function (void (*func) (void *info), void *info, int nonatomic, int wait)
508 struct call_data_struct data;
511 spin_lock(&call_lock);
512 cpus = num_online_cpus() - 1;
514 spin_unlock(&call_lock);
518 /* Can deadlock when called with interrupts disabled */
519 WARN_ON(irqs_disabled());
523 atomic_set(&data.started, 0);
526 atomic_set(&data.finished, 0);
529 mb(); /* ensure store to call_data precedes setting of IPI_CALL_FUNC */
530 send_IPI_allbutself(IPI_CALL_FUNC);
532 /* Wait for response */
533 while (atomic_read(&data.started) != cpus)
537 while (atomic_read(&data.finished) != cpus)
541 spin_unlock(&call_lock);
544 EXPORT_SYMBOL(smp_call_function);
547 * this function calls the 'stop' function on all other CPUs in the system.
552 send_IPI_allbutself(IPI_CPU_STOP);
556 setup_profiling_timer (unsigned int multiplier)