1 /* Copyright (C) 2004 Mips Technologies, Inc */
3 #include <linux/kernel.h>
4 #include <linux/sched.h>
5 #include <linux/cpumask.h>
6 #include <linux/interrupt.h>
7 #include <linux/kernel_stat.h>
8 #include <linux/module.h>
11 #include <asm/processor.h>
12 #include <asm/atomic.h>
13 #include <asm/system.h>
14 #include <asm/hardirq.h>
15 #include <asm/hazards.h>
17 #include <asm/mmu_context.h>
19 #include <asm/mipsregs.h>
20 #include <asm/cacheflush.h>
22 #include <asm/addrspace.h>
24 #include <asm/smtc_ipi.h>
25 #include <asm/smtc_proc.h>
28 * This file should be built into the kernel only if CONFIG_MIPS_MT_SMTC is set.
31 #define MIPS_CPU_IPI_IRQ 1
33 #define LOCK_MT_PRA() \
34 local_irq_save(flags); \
37 #define UNLOCK_MT_PRA() \
39 local_irq_restore(flags)
41 #define LOCK_CORE_PRA() \
42 local_irq_save(flags); \
45 #define UNLOCK_CORE_PRA() \
47 local_irq_restore(flags)
50 * Data structures purely associated with SMTC parallelism
55 * Table for tracking ASIDs whose lifetime is prolonged.
58 asiduse smtc_live_asid[MAX_SMTC_TLBS][MAX_SMTC_ASIDS];
61 * Clock interrupt "latch" buffers, per "CPU"
64 unsigned int ipi_timer_latch[NR_CPUS];
67 * Number of InterProcessor Interupt (IPI) message buffers to allocate
70 #define IPIBUF_PER_CPU 4
72 static struct smtc_ipi_q IPIQ[NR_CPUS];
73 static struct smtc_ipi_q freeIPIq;
76 /* Forward declarations */
78 void ipi_decode(struct smtc_ipi *);
79 static void post_direct_ipi(int cpu, struct smtc_ipi *pipi);
80 static void setup_cross_vpe_interrupts(unsigned int nvpe);
81 void init_smtc_stats(void);
83 /* Global SMTC Status */
85 unsigned int smtc_status = 0;
87 /* Boot command line configuration overrides */
89 static int ipibuffers = 0;
90 static int nostlb = 0;
91 static int asidmask = 0;
92 unsigned long smtc_asid_mask = 0xff;
94 static int __init ipibufs(char *str)
96 get_option(&str, &ipibuffers);
100 static int __init stlb_disable(char *s)
106 static int __init asidmask_set(char *str)
108 get_option(&str, &asidmask);
118 smtc_asid_mask = (unsigned long)asidmask;
121 printk("ILLEGAL ASID mask 0x%x from command line\n", asidmask);
126 __setup("ipibufs=", ipibufs);
127 __setup("nostlb", stlb_disable);
128 __setup("asidmask=", asidmask_set);
130 #ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG
132 static int hang_trig = 0;
134 static int __init hangtrig_enable(char *s)
141 __setup("hangtrig", hangtrig_enable);
143 #define DEFAULT_BLOCKED_IPI_LIMIT 32
145 static int timerq_limit = DEFAULT_BLOCKED_IPI_LIMIT;
147 static int __init tintq(char *str)
149 get_option(&str, &timerq_limit);
153 __setup("tintq=", tintq);
155 static int imstuckcount[2][8];
156 /* vpemask represents IM/IE bits of per-VPE Status registers, low-to-high */
157 static int vpemask[2][8] = {
158 {0, 0, 1, 0, 0, 0, 0, 1},
159 {0, 0, 0, 0, 0, 0, 0, 1}
161 int tcnoprog[NR_CPUS];
162 static atomic_t idle_hook_initialized = {0};
163 static int clock_hang_reported[NR_CPUS];
165 #endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */
167 /* Initialize shared TLB - the should probably migrate to smtc_setup_cpus() */
169 void __init sanitize_tlb_entries(void)
171 printk("Deprecated sanitize_tlb_entries() invoked\n");
176 * Configure shared TLB - VPC configuration bit must be set by caller
179 static void smtc_configure_tlb(void)
182 unsigned long mvpconf0;
183 unsigned long config1val;
185 /* Set up ASID preservation table */
186 for (vpes=0; vpes<MAX_SMTC_TLBS; vpes++) {
187 for(i = 0; i < MAX_SMTC_ASIDS; i++) {
188 smtc_live_asid[vpes][i] = 0;
191 mvpconf0 = read_c0_mvpconf0();
193 if ((vpes = ((mvpconf0 & MVPCONF0_PVPE)
194 >> MVPCONF0_PVPE_SHIFT) + 1) > 1) {
195 /* If we have multiple VPEs, try to share the TLB */
196 if ((mvpconf0 & MVPCONF0_TLBS) && !nostlb) {
198 * If TLB sizing is programmable, shared TLB
199 * size is the total available complement.
200 * Otherwise, we have to take the sum of all
201 * static VPE TLB entries.
203 if ((tlbsiz = ((mvpconf0 & MVPCONF0_PTLBE)
204 >> MVPCONF0_PTLBE_SHIFT)) == 0) {
206 * If there's more than one VPE, there had better
207 * be more than one TC, because we need one to bind
208 * to each VPE in turn to be able to read
209 * its configuration state!
212 /* Stop the TC from doing anything foolish */
213 write_tc_c0_tchalt(TCHALT_H);
215 /* No need to un-Halt - that happens later anyway */
216 for (i=0; i < vpes; i++) {
217 write_tc_c0_tcbind(i);
219 * To be 100% sure we're really getting the right
220 * information, we exit the configuration state
221 * and do an IHB after each rebinding.
224 read_c0_mvpcontrol() & ~ MVPCONTROL_VPC );
227 * Only count if the MMU Type indicated is TLB
229 if (((read_vpe_c0_config() & MIPS_CONF_MT) >> 7) == 1) {
230 config1val = read_vpe_c0_config1();
231 tlbsiz += ((config1val >> 25) & 0x3f) + 1;
234 /* Put core back in configuration state */
236 read_c0_mvpcontrol() | MVPCONTROL_VPC );
240 write_c0_mvpcontrol(read_c0_mvpcontrol() | MVPCONTROL_STLB);
244 * Setup kernel data structures to use software total,
245 * rather than read the per-VPE Config1 value. The values
246 * for "CPU 0" gets copied to all the other CPUs as part
247 * of their initialization in smtc_cpu_setup().
250 /* MIPS32 limits TLB indices to 64 */
253 cpu_data[0].tlbsize = current_cpu_data.tlbsize = tlbsiz;
254 smtc_status |= SMTC_TLB_SHARED;
255 local_flush_tlb_all();
257 printk("TLB of %d entry pairs shared by %d VPEs\n",
260 printk("WARNING: TLB Not Sharable on SMTC Boot!\n");
267 * Incrementally build the CPU map out of constituent MIPS MT cores,
268 * using the specified available VPEs and TCs. Plaform code needs
269 * to ensure that each MIPS MT core invokes this routine on reset,
272 * This version of the build_cpu_map and prepare_cpus routines assumes
273 * that *all* TCs of a MIPS MT core will be used for Linux, and that
274 * they will be spread across *all* available VPEs (to minimise the
275 * loss of efficiency due to exception service serialization).
276 * An improved version would pick up configuration information and
277 * possibly leave some TCs/VPEs as "slave" processors.
279 * Use c0_MVPConf0 to find out how many TCs are available, setting up
280 * phys_cpu_present_map and the logical/physical mappings.
283 int __init mipsmt_build_cpu_map(int start_cpu_slot)
288 * The CPU map isn't actually used for anything at this point,
289 * so it's not clear what else we should do apart from set
290 * everything up so that "logical" = "physical".
292 ntcs = ((read_c0_mvpconf0() & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1;
293 for (i=start_cpu_slot; i<NR_CPUS && i<ntcs; i++) {
294 cpu_set(i, phys_cpu_present_map);
295 __cpu_number_map[i] = i;
296 __cpu_logical_map[i] = i;
298 /* Initialize map of CPUs with FPUs */
299 cpus_clear(mt_fpu_cpumask);
301 /* One of those TC's is the one booting, and not a secondary... */
302 printk("%i available secondary CPU TC(s)\n", i - 1);
308 * Common setup before any secondaries are started
309 * Make sure all CPU's are in a sensible state before we boot any of the
312 * For MIPS MT "SMTC" operation, we set up all TCs, spread as evenly
313 * as possible across the available VPEs.
316 static void smtc_tc_setup(int vpe, int tc, int cpu)
319 write_tc_c0_tchalt(TCHALT_H);
321 write_tc_c0_tcstatus((read_tc_c0_tcstatus()
322 & ~(TCSTATUS_TKSU | TCSTATUS_DA | TCSTATUS_IXMT))
324 write_tc_c0_tccontext(0);
326 write_tc_c0_tcbind(vpe);
327 /* In general, all TCs should have the same cpu_data indications */
328 memcpy(&cpu_data[cpu], &cpu_data[0], sizeof(struct cpuinfo_mips));
329 /* For 34Kf, start with TC/CPU 0 as sole owner of single FPU context */
330 if (cpu_data[0].cputype == CPU_34K)
331 cpu_data[cpu].options &= ~MIPS_CPU_FPU;
332 cpu_data[cpu].vpe_id = vpe;
333 cpu_data[cpu].tc_id = tc;
337 void mipsmt_prepare_cpus(void)
339 int i, vpe, tc, ntc, nvpe, tcpervpe, slop, cpu;
343 struct smtc_ipi *pipi;
345 /* disable interrupts so we can disable MT */
346 local_irq_save(flags);
347 /* disable MT so we can configure */
351 spin_lock_init(&freeIPIq.lock);
354 * We probably don't have as many VPEs as we do SMP "CPUs",
355 * but it's possible - and in any case we'll never use more!
357 for (i=0; i<NR_CPUS; i++) {
358 IPIQ[i].head = IPIQ[i].tail = NULL;
359 spin_lock_init(&IPIQ[i].lock);
361 ipi_timer_latch[i] = 0;
364 /* cpu_data index starts at zero */
366 cpu_data[cpu].vpe_id = 0;
367 cpu_data[cpu].tc_id = 0;
370 /* Report on boot-time options */
371 mips_mt_set_cpuoptions ();
373 printk("Limit of %d VPEs set\n", vpelimit);
375 printk("Limit of %d TCs set\n", tclimit);
377 printk("Shared TLB Use Inhibited - UNSAFE for Multi-VPE Operation\n");
380 printk("ASID mask value override to 0x%x\n", asidmask);
383 #ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG
385 printk("Logic Analyser Trigger on suspected TC hang\n");
386 #endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */
388 /* Put MVPE's into 'configuration state' */
389 write_c0_mvpcontrol( read_c0_mvpcontrol() | MVPCONTROL_VPC );
391 val = read_c0_mvpconf0();
392 nvpe = ((val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1;
393 if (vpelimit > 0 && nvpe > vpelimit)
395 ntc = ((val & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1;
398 if (tclimit > 0 && ntc > tclimit)
400 tcpervpe = ntc / nvpe;
401 slop = ntc % nvpe; /* Residual TCs, < NVPE */
403 /* Set up shared TLB */
404 smtc_configure_tlb();
406 for (tc = 0, vpe = 0 ; (vpe < nvpe) && (tc < ntc) ; vpe++) {
411 write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_MVP);
414 printk("VPE %d: TC", vpe);
415 for (i = 0; i < tcpervpe; i++) {
417 * TC 0 is bound to VPE 0 at reset,
418 * and is presumably executing this
419 * code. Leave it alone!
422 smtc_tc_setup(vpe,tc, cpu);
430 smtc_tc_setup(vpe,tc, cpu);
439 * Clear any stale software interrupts from VPE's Cause
441 write_vpe_c0_cause(0);
444 * Clear ERL/EXL of VPEs other than 0
445 * and set restricted interrupt enable/mask.
447 write_vpe_c0_status((read_vpe_c0_status()
448 & ~(ST0_BEV | ST0_ERL | ST0_EXL | ST0_IM))
449 | (STATUSF_IP0 | STATUSF_IP1 | STATUSF_IP7
452 * set config to be the same as vpe0,
453 * particularly kseg0 coherency alg
455 write_vpe_c0_config(read_c0_config());
456 /* Clear any pending timer interrupt */
457 write_vpe_c0_compare(0);
458 /* Propagate Config7 */
459 write_vpe_c0_config7(read_c0_config7());
460 write_vpe_c0_count(read_c0_count());
462 /* enable multi-threading within VPE */
463 write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() | VPECONTROL_TE);
465 write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_VPA);
469 * Pull any physically present but unused TCs out of circulation.
471 while (tc < (((val & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1)) {
472 cpu_clear(tc, phys_cpu_present_map);
473 cpu_clear(tc, cpu_present_map);
477 /* release config state */
478 write_c0_mvpcontrol( read_c0_mvpcontrol() & ~ MVPCONTROL_VPC );
482 /* Set up coprocessor affinity CPU mask(s) */
484 for (tc = 0; tc < ntc; tc++) {
485 if (cpu_data[tc].options & MIPS_CPU_FPU)
486 cpu_set(tc, mt_fpu_cpumask);
489 /* set up ipi interrupts... */
491 /* If we have multiple VPEs running, set up the cross-VPE interrupt */
493 setup_cross_vpe_interrupts(nvpe);
495 /* Set up queue of free IPI "messages". */
496 nipi = NR_CPUS * IPIBUF_PER_CPU;
500 pipi = kmalloc(nipi *sizeof(struct smtc_ipi), GFP_KERNEL);
502 panic("kmalloc of IPI message buffers failed\n");
504 printk("IPI buffer pool of %d buffers\n", nipi);
505 for (i = 0; i < nipi; i++) {
506 smtc_ipi_nq(&freeIPIq, pipi);
510 /* Arm multithreading and enable other VPEs - but all TCs are Halted */
513 local_irq_restore(flags);
514 /* Initialize SMTC /proc statistics/diagnostics */
520 * Setup the PC, SP, and GP of a secondary processor and start it
522 * smp_bootstrap is the place to resume from
523 * __KSTK_TOS(idle) is apparently the stack pointer
524 * (unsigned long)idle->thread_info the gp
527 void __cpuinit smtc_boot_secondary(int cpu, struct task_struct *idle)
529 extern u32 kernelsp[NR_CPUS];
534 if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) {
537 settc(cpu_data[cpu].tc_id);
540 write_tc_c0_tcrestart((unsigned long)&smp_bootstrap);
543 kernelsp[cpu] = __KSTK_TOS(idle);
544 write_tc_gpr_sp(__KSTK_TOS(idle));
547 write_tc_gpr_gp((unsigned long)task_thread_info(idle));
549 smtc_status |= SMTC_MTC_ACTIVE;
550 write_tc_c0_tchalt(0);
551 if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) {
557 void smtc_init_secondary(void)
560 * Start timer on secondary VPEs if necessary.
561 * plat_timer_setup has already have been invoked by init/main
562 * on "boot" TC. Like per_cpu_trap_init() hack, this assumes that
563 * SMTC init code assigns TCs consdecutively and in ascending order
564 * to across available VPEs.
566 if (((read_c0_tcbind() & TCBIND_CURTC) != 0) &&
567 ((read_c0_tcbind() & TCBIND_CURVPE)
568 != cpu_data[smp_processor_id() - 1].vpe_id)){
569 write_c0_compare (read_c0_count() + mips_hpt_frequency/HZ);
575 void smtc_smp_finish(void)
577 printk("TC %d going on-line as CPU %d\n",
578 cpu_data[smp_processor_id()].tc_id, smp_processor_id());
581 void smtc_cpus_done(void)
586 * Support for SMTC-optimized driver IRQ registration
590 * SMTC Kernel needs to manipulate low-level CPU interrupt mask
591 * in do_IRQ. These are passed in setup_irq_smtc() and stored
595 int setup_irq_smtc(unsigned int irq, struct irqaction * new,
596 unsigned long hwmask)
598 #ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG
599 unsigned int vpe = current_cpu_data.vpe_id;
601 vpemask[vpe][irq - MIPS_CPU_IRQ_BASE] = 1;
603 irq_hwmask[irq] = hwmask;
605 return setup_irq(irq, new);
609 * IPI model for SMTC is tricky, because interrupts aren't TC-specific.
610 * Within a VPE one TC can interrupt another by different approaches.
611 * The easiest to get right would probably be to make all TCs except
612 * the target IXMT and set a software interrupt, but an IXMT-based
613 * scheme requires that a handler must run before a new IPI could
614 * be sent, which would break the "broadcast" loops in MIPS MT.
615 * A more gonzo approach within a VPE is to halt the TC, extract
616 * its Restart, Status, and a couple of GPRs, and program the Restart
617 * address to emulate an interrupt.
619 * Within a VPE, one can be confident that the target TC isn't in
620 * a critical EXL state when halted, since the write to the Halt
621 * register could not have issued on the writing thread if the
622 * halting thread had EXL set. So k0 and k1 of the target TC
623 * can be used by the injection code. Across VPEs, one can't
624 * be certain that the target TC isn't in a critical exception
625 * state. So we try a two-step process of sending a software
626 * interrupt to the target VPE, which either handles the event
627 * itself (if it was the target) or injects the event within
631 static void smtc_ipi_qdump(void)
635 for (i = 0; i < NR_CPUS ;i++) {
636 printk("IPIQ[%d]: head = 0x%x, tail = 0x%x, depth = %d\n",
637 i, (unsigned)IPIQ[i].head, (unsigned)IPIQ[i].tail,
643 * The standard atomic.h primitives don't quite do what we want
644 * here: We need an atomic add-and-return-previous-value (which
645 * could be done with atomic_add_return and a decrement) and an
646 * atomic set/zero-and-return-previous-value (which can't really
647 * be done with the atomic.h primitives). And since this is
648 * MIPS MT, we can assume that we have LL/SC.
650 static __inline__ int atomic_postincrement(unsigned int *pv)
652 unsigned long result;
656 __asm__ __volatile__(
662 : "=&r" (result), "=&r" (temp), "=m" (*pv)
669 void smtc_send_ipi(int cpu, int type, unsigned int action)
672 struct smtc_ipi *pipi;
676 if (cpu == smp_processor_id()) {
677 printk("Cannot Send IPI to self!\n");
680 /* Set up a descriptor, to be delivered either promptly or queued */
681 pipi = smtc_ipi_dq(&freeIPIq);
684 mips_mt_regdump(dvpe());
685 panic("IPI Msg. Buffers Depleted\n");
688 pipi->arg = (void *)action;
690 if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) {
691 /* If not on same VPE, enqueue and send cross-VPE interupt */
692 smtc_ipi_nq(&IPIQ[cpu], pipi);
694 settc(cpu_data[cpu].tc_id);
695 write_vpe_c0_cause(read_vpe_c0_cause() | C_SW1);
699 * Not sufficient to do a LOCK_MT_PRA (dmt) here,
700 * since ASID shootdown on the other VPE may
701 * collide with this operation.
704 settc(cpu_data[cpu].tc_id);
705 /* Halt the targeted TC */
706 write_tc_c0_tchalt(TCHALT_H);
710 * Inspect TCStatus - if IXMT is set, we have to queue
711 * a message. Otherwise, we set up the "interrupt"
714 tcstatus = read_tc_c0_tcstatus();
716 if ((tcstatus & TCSTATUS_IXMT) != 0) {
718 * Spin-waiting here can deadlock,
719 * so we queue the message for the target TC.
721 write_tc_c0_tchalt(0);
723 /* Try to reduce redundant timer interrupt messages */
724 if (type == SMTC_CLOCK_TICK) {
725 if (atomic_postincrement(&ipi_timer_latch[cpu])!=0){
726 smtc_ipi_nq(&freeIPIq, pipi);
730 smtc_ipi_nq(&IPIQ[cpu], pipi);
732 post_direct_ipi(cpu, pipi);
733 write_tc_c0_tchalt(0);
740 * Send IPI message to Halted TC, TargTC/TargVPE already having been set
742 static void post_direct_ipi(int cpu, struct smtc_ipi *pipi)
744 struct pt_regs *kstack;
745 unsigned long tcstatus;
746 unsigned long tcrestart;
747 extern u32 kernelsp[NR_CPUS];
748 extern void __smtc_ipi_vector(void);
750 /* Extract Status, EPC from halted TC */
751 tcstatus = read_tc_c0_tcstatus();
752 tcrestart = read_tc_c0_tcrestart();
753 /* If TCRestart indicates a WAIT instruction, advance the PC */
754 if ((tcrestart & 0x80000000)
755 && ((*(unsigned int *)tcrestart & 0xfe00003f) == 0x42000020)) {
759 * Save on TC's future kernel stack
761 * CU bit of Status is indicator that TC was
762 * already running on a kernel stack...
764 if (tcstatus & ST0_CU0) {
765 /* Note that this "- 1" is pointer arithmetic */
766 kstack = ((struct pt_regs *)read_tc_gpr_sp()) - 1;
768 kstack = ((struct pt_regs *)kernelsp[cpu]) - 1;
771 kstack->cp0_epc = (long)tcrestart;
773 kstack->cp0_tcstatus = tcstatus;
774 /* Pass token of operation to be performed kernel stack pad area */
775 kstack->pad0[4] = (unsigned long)pipi;
776 /* Pass address of function to be called likewise */
777 kstack->pad0[5] = (unsigned long)&ipi_decode;
778 /* Set interrupt exempt and kernel mode */
779 tcstatus |= TCSTATUS_IXMT;
780 tcstatus &= ~TCSTATUS_TKSU;
781 write_tc_c0_tcstatus(tcstatus);
783 /* Set TC Restart address to be SMTC IPI vector */
784 write_tc_c0_tcrestart(__smtc_ipi_vector);
787 static void ipi_resched_interrupt(void)
789 /* Return from interrupt should be enough to cause scheduler check */
793 static void ipi_call_interrupt(void)
795 /* Invoke generic function invocation code in smp.c */
796 smp_call_function_interrupt();
799 void ipi_decode(struct smtc_ipi *pipi)
801 void *arg_copy = pipi->arg;
802 int type_copy = pipi->type;
803 int dest_copy = pipi->dest;
805 smtc_ipi_nq(&freeIPIq, pipi);
807 case SMTC_CLOCK_TICK:
809 kstat_this_cpu.irqs[MIPS_CPU_IRQ_BASE + cp0_compare_irq]++;
810 /* Invoke Clock "Interrupt" */
811 ipi_timer_latch[dest_copy] = 0;
812 #ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG
813 clock_hang_reported[dest_copy] = 0;
814 #endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */
815 local_timer_interrupt(0, NULL);
819 switch ((int)arg_copy) {
820 case SMP_RESCHEDULE_YOURSELF:
821 ipi_resched_interrupt();
823 case SMP_CALL_FUNCTION:
824 ipi_call_interrupt();
827 printk("Impossible SMTC IPI Argument 0x%x\n",
833 printk("Impossible SMTC IPI Type 0x%x\n", type_copy);
838 void deferred_smtc_ipi(void)
840 struct smtc_ipi *pipi;
843 int q = smp_processor_id();
846 * Test is not atomic, but much faster than a dequeue,
847 * and the vast majority of invocations will have a null queue.
849 if (IPIQ[q].head != NULL) {
850 while((pipi = smtc_ipi_dq(&IPIQ[q])) != NULL) {
851 /* ipi_decode() should be called with interrupts off */
852 local_irq_save(flags);
854 local_irq_restore(flags);
860 * Send clock tick to all TCs except the one executing the funtion
863 void smtc_timer_broadcast(void)
866 int myTC = cpu_data[smp_processor_id()].tc_id;
867 int myVPE = cpu_data[smp_processor_id()].vpe_id;
869 smtc_cpu_stats[smp_processor_id()].timerints++;
871 for_each_online_cpu(cpu) {
872 if (cpu_data[cpu].vpe_id == myVPE &&
873 cpu_data[cpu].tc_id != myTC)
874 smtc_send_ipi(cpu, SMTC_CLOCK_TICK, 0);
879 * Cross-VPE interrupts in the SMTC prototype use "software interrupts"
880 * set via cross-VPE MTTR manipulation of the Cause register. It would be
881 * in some regards preferable to have external logic for "doorbell" hardware
885 static int cpu_ipi_irq = MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_IRQ;
887 static irqreturn_t ipi_interrupt(int irq, void *dev_idm)
889 int my_vpe = cpu_data[smp_processor_id()].vpe_id;
890 int my_tc = cpu_data[smp_processor_id()].tc_id;
892 struct smtc_ipi *pipi;
893 unsigned long tcstatus;
896 unsigned int mtflags;
897 unsigned int vpflags;
900 * So long as cross-VPE interrupts are done via
901 * MFTR/MTTR read-modify-writes of Cause, we need
902 * to stop other VPEs whenever the local VPE does
905 local_irq_save(flags);
907 clear_c0_cause(0x100 << MIPS_CPU_IPI_IRQ);
908 set_c0_status(0x100 << MIPS_CPU_IPI_IRQ);
911 local_irq_restore(flags);
914 * Cross-VPE Interrupt handler: Try to directly deliver IPIs
915 * queued for TCs on this VPE other than the current one.
916 * Return-from-interrupt should cause us to drain the queue
917 * for the current TC, so we ought not to have to do it explicitly here.
920 for_each_online_cpu(cpu) {
921 if (cpu_data[cpu].vpe_id != my_vpe)
924 pipi = smtc_ipi_dq(&IPIQ[cpu]);
926 if (cpu_data[cpu].tc_id != my_tc) {
929 settc(cpu_data[cpu].tc_id);
930 write_tc_c0_tchalt(TCHALT_H);
932 tcstatus = read_tc_c0_tcstatus();
933 if ((tcstatus & TCSTATUS_IXMT) == 0) {
934 post_direct_ipi(cpu, pipi);
937 write_tc_c0_tchalt(0);
940 smtc_ipi_req(&IPIQ[cpu], pipi);
944 * ipi_decode() should be called
945 * with interrupts off
947 local_irq_save(flags);
949 local_irq_restore(flags);
957 static void ipi_irq_dispatch(void)
962 static struct irqaction irq_ipi = {
963 .handler = ipi_interrupt,
964 .flags = IRQF_DISABLED,
969 static void setup_cross_vpe_interrupts(unsigned int nvpe)
975 panic("SMTC Kernel requires Vectored Interupt support");
977 set_vi_handler(MIPS_CPU_IPI_IRQ, ipi_irq_dispatch);
979 setup_irq_smtc(cpu_ipi_irq, &irq_ipi, (0x100 << MIPS_CPU_IPI_IRQ));
981 set_irq_handler(cpu_ipi_irq, handle_percpu_irq);
985 * SMTC-specific hacks invoked from elsewhere in the kernel.
987 * smtc_ipi_replay is called from raw_local_irq_restore which is only ever
988 * called with interrupts disabled. We do rely on interrupts being disabled
989 * here because using spin_lock_irqsave()/spin_unlock_irqrestore() would
990 * result in a recursive call to raw_local_irq_restore().
993 static void __smtc_ipi_replay(void)
995 unsigned int cpu = smp_processor_id();
998 * To the extent that we've ever turned interrupts off,
999 * we may have accumulated deferred IPIs. This is subtle.
1000 * If we use the smtc_ipi_qdepth() macro, we'll get an
1001 * exact number - but we'll also disable interrupts
1002 * and create a window of failure where a new IPI gets
1003 * queued after we test the depth but before we re-enable
1004 * interrupts. So long as IXMT never gets set, however,
1005 * we should be OK: If we pick up something and dispatch
1006 * it here, that's great. If we see nothing, but concurrent
1007 * with this operation, another TC sends us an IPI, IXMT
1008 * is clear, and we'll handle it as a real pseudo-interrupt
1009 * and not a pseudo-pseudo interrupt.
1011 if (IPIQ[cpu].depth > 0) {
1013 struct smtc_ipi_q *q = &IPIQ[cpu];
1014 struct smtc_ipi *pipi;
1015 extern void self_ipi(struct smtc_ipi *);
1017 spin_lock(&q->lock);
1018 pipi = __smtc_ipi_dq(q);
1019 spin_unlock(&q->lock);
1024 smtc_cpu_stats[cpu].selfipis++;
1029 void smtc_ipi_replay(void)
1031 raw_local_irq_disable();
1032 __smtc_ipi_replay();
1035 EXPORT_SYMBOL(smtc_ipi_replay);
1037 void smtc_idle_loop_hook(void)
1039 #ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG
1048 * printk within DMT-protected regions can deadlock,
1049 * so buffer diagnostic messages for later output.
1052 char id_ho_db_msg[768]; /* worst-case use should be less than 700 */
1054 if (atomic_read(&idle_hook_initialized) == 0) { /* fast test */
1055 if (atomic_add_return(1, &idle_hook_initialized) == 1) {
1057 /* Tedious stuff to just do once */
1058 mvpconf0 = read_c0_mvpconf0();
1059 hook_ntcs = ((mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1;
1060 if (hook_ntcs > NR_CPUS)
1061 hook_ntcs = NR_CPUS;
1062 for (tc = 0; tc < hook_ntcs; tc++) {
1064 clock_hang_reported[tc] = 0;
1066 for (vpe = 0; vpe < 2; vpe++)
1067 for (im = 0; im < 8; im++)
1068 imstuckcount[vpe][im] = 0;
1069 printk("Idle loop test hook initialized for %d TCs\n", hook_ntcs);
1070 atomic_set(&idle_hook_initialized, 1000);
1072 /* Someone else is initializing in parallel - let 'em finish */
1073 while (atomic_read(&idle_hook_initialized) < 1000)
1078 /* Have we stupidly left IXMT set somewhere? */
1079 if (read_c0_tcstatus() & 0x400) {
1080 write_c0_tcstatus(read_c0_tcstatus() & ~0x400);
1082 printk("Dangling IXMT in cpu_idle()\n");
1085 /* Have we stupidly left an IM bit turned off? */
1086 #define IM_LIMIT 2000
1087 local_irq_save(flags);
1089 pdb_msg = &id_ho_db_msg[0];
1090 im = read_c0_status();
1091 vpe = current_cpu_data.vpe_id;
1092 for (bit = 0; bit < 8; bit++) {
1094 * In current prototype, I/O interrupts
1095 * are masked for VPE > 0
1097 if (vpemask[vpe][bit]) {
1098 if (!(im & (0x100 << bit)))
1099 imstuckcount[vpe][bit]++;
1101 imstuckcount[vpe][bit] = 0;
1102 if (imstuckcount[vpe][bit] > IM_LIMIT) {
1103 set_c0_status(0x100 << bit);
1105 imstuckcount[vpe][bit] = 0;
1106 pdb_msg += sprintf(pdb_msg,
1107 "Dangling IM %d fixed for VPE %d\n", bit,
1114 * Now that we limit outstanding timer IPIs, check for hung TC
1116 for (tc = 0; tc < NR_CPUS; tc++) {
1117 /* Don't check ourself - we'll dequeue IPIs just below */
1118 if ((tc != smp_processor_id()) &&
1119 ipi_timer_latch[tc] > timerq_limit) {
1120 if (clock_hang_reported[tc] == 0) {
1121 pdb_msg += sprintf(pdb_msg,
1122 "TC %d looks hung with timer latch at %d\n",
1123 tc, ipi_timer_latch[tc]);
1124 clock_hang_reported[tc]++;
1129 local_irq_restore(flags);
1130 if (pdb_msg != &id_ho_db_msg[0])
1131 printk("CPU%d: %s", smp_processor_id(), id_ho_db_msg);
1132 #endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */
1135 * Replay any accumulated deferred IPIs. If "Instant Replay"
1136 * is in use, there should never be any.
1138 #ifndef CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY
1140 unsigned long flags;
1142 local_irq_save(flags);
1143 __smtc_ipi_replay();
1144 local_irq_restore(flags);
1146 #endif /* CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY */
1149 void smtc_soft_dump(void)
1153 printk("Counter Interrupts taken per CPU (TC)\n");
1154 for (i=0; i < NR_CPUS; i++) {
1155 printk("%d: %ld\n", i, smtc_cpu_stats[i].timerints);
1157 printk("Self-IPI invocations:\n");
1158 for (i=0; i < NR_CPUS; i++) {
1159 printk("%d: %ld\n", i, smtc_cpu_stats[i].selfipis);
1162 printk("Timer IPI Backlogs:\n");
1163 for (i=0; i < NR_CPUS; i++) {
1164 printk("%d: %d\n", i, ipi_timer_latch[i]);
1166 printk("%d Recoveries of \"stolen\" FPU\n",
1167 atomic_read(&smtc_fpu_recoveries));
1172 * TLB management routines special to SMTC
1175 void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
1177 unsigned long flags, mtflags, tcstat, prevhalt, asid;
1181 * It would be nice to be able to use a spinlock here,
1182 * but this is invoked from within TLB flush routines
1183 * that protect themselves with DVPE, so if a lock is
1184 * held by another TC, it'll never be freed.
1186 * DVPE/DMT must not be done with interrupts enabled,
1187 * so even so most callers will already have disabled
1188 * them, let's be really careful...
1191 local_irq_save(flags);
1192 if (smtc_status & SMTC_TLB_SHARED) {
1197 tlb = cpu_data[cpu].vpe_id;
1199 asid = asid_cache(cpu);
1202 if (!((asid += ASID_INC) & ASID_MASK) ) {
1203 if (cpu_has_vtag_icache)
1205 /* Traverse all online CPUs (hack requires contigous range) */
1206 for (i = 0; i < num_online_cpus(); i++) {
1208 * We don't need to worry about our own CPU, nor those of
1209 * CPUs who don't share our TLB.
1211 if ((i != smp_processor_id()) &&
1212 ((smtc_status & SMTC_TLB_SHARED) ||
1213 (cpu_data[i].vpe_id == cpu_data[cpu].vpe_id))) {
1214 settc(cpu_data[i].tc_id);
1215 prevhalt = read_tc_c0_tchalt() & TCHALT_H;
1217 write_tc_c0_tchalt(TCHALT_H);
1220 tcstat = read_tc_c0_tcstatus();
1221 smtc_live_asid[tlb][(tcstat & ASID_MASK)] |= (asiduse)(0x1 << i);
1223 write_tc_c0_tchalt(0);
1226 if (!asid) /* fix version if needed */
1227 asid = ASID_FIRST_VERSION;
1228 local_flush_tlb_all(); /* start new asid cycle */
1230 } while (smtc_live_asid[tlb][(asid & ASID_MASK)]);
1233 * SMTC shares the TLB within VPEs and possibly across all VPEs.
1235 for (i = 0; i < num_online_cpus(); i++) {
1236 if ((smtc_status & SMTC_TLB_SHARED) ||
1237 (cpu_data[i].vpe_id == cpu_data[cpu].vpe_id))
1238 cpu_context(i, mm) = asid_cache(i) = asid;
1241 if (smtc_status & SMTC_TLB_SHARED)
1245 local_irq_restore(flags);
1249 * Invoked from macros defined in mmu_context.h
1250 * which must already have disabled interrupts
1251 * and done a DVPE or DMT as appropriate.
1254 void smtc_flush_tlb_asid(unsigned long asid)
1259 entry = read_c0_wired();
1261 /* Traverse all non-wired entries */
1262 while (entry < current_cpu_data.tlbsize) {
1263 write_c0_index(entry);
1267 ehi = read_c0_entryhi();
1268 if ((ehi & ASID_MASK) == asid) {
1270 * Invalidate only entries with specified ASID,
1271 * makiing sure all entries differ.
1273 write_c0_entryhi(CKSEG0 + (entry << (PAGE_SHIFT + 1)));
1274 write_c0_entrylo0(0);
1275 write_c0_entrylo1(0);
1277 tlb_write_indexed();
1281 write_c0_index(PARKED_INDEX);
1286 * Support for single-threading cache flush operations.
1289 static int halt_state_save[NR_CPUS];
1292 * To really, really be sure that nothing is being done
1293 * by other TCs, halt them all. This code assumes that
1294 * a DVPE has already been done, so while their Halted
1295 * state is theoretically architecturally unstable, in
1296 * practice, it's not going to change while we're looking
1300 void smtc_cflush_lockdown(void)
1304 for_each_online_cpu(cpu) {
1305 if (cpu != smp_processor_id()) {
1306 settc(cpu_data[cpu].tc_id);
1307 halt_state_save[cpu] = read_tc_c0_tchalt();
1308 write_tc_c0_tchalt(TCHALT_H);
1314 /* It would be cheating to change the cpu_online states during a flush! */
1316 void smtc_cflush_release(void)
1321 * Start with a hazard barrier to ensure
1322 * that all CACHE ops have played through.
1326 for_each_online_cpu(cpu) {
1327 if (cpu != smp_processor_id()) {
1328 settc(cpu_data[cpu].tc_id);
1329 write_tc_c0_tchalt(halt_state_save[cpu]);