+/*
+ * Called with preeemption disabled.
+ */
+static void
+smp_send_local_flush_tlb (int cpu)
+{
+ platform_send_ipi(cpu, IA64_IPI_LOCAL_TLB_FLUSH, IA64_IPI_DM_INT, 0);
+}
+
+void
+smp_local_flush_tlb(void)
+{
+ /*
+ * Use atomic ops. Otherwise, the load/increment/store sequence from
+ * a "++" operation can have the line stolen between the load & store.
+ * The overhead of the atomic op in negligible in this case & offers
+ * significant benefit for the brief periods where lots of cpus
+ * are simultaneously flushing TLBs.
+ */
+ ia64_fetchadd(1, &local_tlb_flush_counts[smp_processor_id()].count, acq);
+ local_flush_tlb_all();
+}
+
+#define FLUSH_DELAY 5 /* Usec backoff to eliminate excessive cacheline bouncing */
+
+void
+smp_flush_tlb_cpumask(cpumask_t xcpumask)
+{
+ unsigned int *counts = __ia64_per_cpu_var(shadow_flush_counts);
+ cpumask_t cpumask = xcpumask;
+ int mycpu, cpu, flush_mycpu = 0;
+
+ preempt_disable();
+ mycpu = smp_processor_id();
+
+ for_each_cpu_mask(cpu, cpumask)
+ counts[cpu] = local_tlb_flush_counts[cpu].count;
+
+ mb();
+ for_each_cpu_mask(cpu, cpumask) {
+ if (cpu == mycpu)
+ flush_mycpu = 1;
+ else
+ smp_send_local_flush_tlb(cpu);
+ }
+
+ if (flush_mycpu)
+ smp_local_flush_tlb();
+
+ for_each_cpu_mask(cpu, cpumask)
+ while(counts[cpu] == local_tlb_flush_counts[cpu].count)
+ udelay(FLUSH_DELAY);
+
+ preempt_enable();
+}
+