1 /* Copyright (C) 2004 Mips Technologies, Inc */
3 #include <linux/kernel.h>
4 #include <linux/sched.h>
5 #include <linux/cpumask.h>
6 #include <linux/interrupt.h>
7 #include <linux/kernel_stat.h>
8 #include <linux/module.h>
11 #include <asm/processor.h>
12 #include <asm/atomic.h>
13 #include <asm/system.h>
14 #include <asm/hardirq.h>
15 #include <asm/hazards.h>
17 #include <asm/mmu_context.h>
19 #include <asm/mipsregs.h>
20 #include <asm/cacheflush.h>
22 #include <asm/addrspace.h>
24 #include <asm/smtc_ipi.h>
25 #include <asm/smtc_proc.h>
28 * This file should be built into the kernel only if CONFIG_MIPS_MT_SMTC is set.
31 #define LOCK_MT_PRA() \
32 local_irq_save(flags); \
35 #define UNLOCK_MT_PRA() \
37 local_irq_restore(flags)
39 #define LOCK_CORE_PRA() \
40 local_irq_save(flags); \
43 #define UNLOCK_CORE_PRA() \
45 local_irq_restore(flags)
48 * Data structures purely associated with SMTC parallelism
53 * Table for tracking ASIDs whose lifetime is prolonged.
56 asiduse smtc_live_asid[MAX_SMTC_TLBS][MAX_SMTC_ASIDS];
59 * Clock interrupt "latch" buffers, per "CPU"
62 unsigned int ipi_timer_latch[NR_CPUS];
65 * Number of InterProcessor Interupt (IPI) message buffers to allocate
68 #define IPIBUF_PER_CPU 4
70 static struct smtc_ipi_q IPIQ[NR_CPUS];
71 static struct smtc_ipi_q freeIPIq;
74 /* Forward declarations */
76 void ipi_decode(struct smtc_ipi *);
77 static void post_direct_ipi(int cpu, struct smtc_ipi *pipi);
78 static void setup_cross_vpe_interrupts(unsigned int nvpe);
79 void init_smtc_stats(void);
81 /* Global SMTC Status */
83 unsigned int smtc_status = 0;
85 /* Boot command line configuration overrides */
87 static int ipibuffers = 0;
88 static int nostlb = 0;
89 static int asidmask = 0;
90 unsigned long smtc_asid_mask = 0xff;
92 static int __init ipibufs(char *str)
94 get_option(&str, &ipibuffers);
98 static int __init stlb_disable(char *s)
104 static int __init asidmask_set(char *str)
106 get_option(&str, &asidmask);
116 smtc_asid_mask = (unsigned long)asidmask;
119 printk("ILLEGAL ASID mask 0x%x from command line\n", asidmask);
124 __setup("ipibufs=", ipibufs);
125 __setup("nostlb", stlb_disable);
126 __setup("asidmask=", asidmask_set);
128 #ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG
130 static int hang_trig = 0;
132 static int __init hangtrig_enable(char *s)
139 __setup("hangtrig", hangtrig_enable);
141 #define DEFAULT_BLOCKED_IPI_LIMIT 32
143 static int timerq_limit = DEFAULT_BLOCKED_IPI_LIMIT;
145 static int __init tintq(char *str)
147 get_option(&str, &timerq_limit);
151 __setup("tintq=", tintq);
153 static int imstuckcount[2][8];
154 /* vpemask represents IM/IE bits of per-VPE Status registers, low-to-high */
155 static int vpemask[2][8] = {
156 {0, 0, 1, 0, 0, 0, 0, 1},
157 {0, 0, 0, 0, 0, 0, 0, 1}
159 int tcnoprog[NR_CPUS];
160 static atomic_t idle_hook_initialized = {0};
161 static int clock_hang_reported[NR_CPUS];
163 #endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */
165 /* Initialize shared TLB - the should probably migrate to smtc_setup_cpus() */
167 void __init sanitize_tlb_entries(void)
169 printk("Deprecated sanitize_tlb_entries() invoked\n");
174 * Configure shared TLB - VPC configuration bit must be set by caller
177 static void smtc_configure_tlb(void)
180 unsigned long mvpconf0;
181 unsigned long config1val;
183 /* Set up ASID preservation table */
184 for (vpes=0; vpes<MAX_SMTC_TLBS; vpes++) {
185 for(i = 0; i < MAX_SMTC_ASIDS; i++) {
186 smtc_live_asid[vpes][i] = 0;
189 mvpconf0 = read_c0_mvpconf0();
191 if ((vpes = ((mvpconf0 & MVPCONF0_PVPE)
192 >> MVPCONF0_PVPE_SHIFT) + 1) > 1) {
193 /* If we have multiple VPEs, try to share the TLB */
194 if ((mvpconf0 & MVPCONF0_TLBS) && !nostlb) {
196 * If TLB sizing is programmable, shared TLB
197 * size is the total available complement.
198 * Otherwise, we have to take the sum of all
199 * static VPE TLB entries.
201 if ((tlbsiz = ((mvpconf0 & MVPCONF0_PTLBE)
202 >> MVPCONF0_PTLBE_SHIFT)) == 0) {
204 * If there's more than one VPE, there had better
205 * be more than one TC, because we need one to bind
206 * to each VPE in turn to be able to read
207 * its configuration state!
210 /* Stop the TC from doing anything foolish */
211 write_tc_c0_tchalt(TCHALT_H);
213 /* No need to un-Halt - that happens later anyway */
214 for (i=0; i < vpes; i++) {
215 write_tc_c0_tcbind(i);
217 * To be 100% sure we're really getting the right
218 * information, we exit the configuration state
219 * and do an IHB after each rebinding.
222 read_c0_mvpcontrol() & ~ MVPCONTROL_VPC );
225 * Only count if the MMU Type indicated is TLB
227 if (((read_vpe_c0_config() & MIPS_CONF_MT) >> 7) == 1) {
228 config1val = read_vpe_c0_config1();
229 tlbsiz += ((config1val >> 25) & 0x3f) + 1;
232 /* Put core back in configuration state */
234 read_c0_mvpcontrol() | MVPCONTROL_VPC );
238 write_c0_mvpcontrol(read_c0_mvpcontrol() | MVPCONTROL_STLB);
242 * Setup kernel data structures to use software total,
243 * rather than read the per-VPE Config1 value. The values
244 * for "CPU 0" gets copied to all the other CPUs as part
245 * of their initialization in smtc_cpu_setup().
248 /* MIPS32 limits TLB indices to 64 */
251 cpu_data[0].tlbsize = current_cpu_data.tlbsize = tlbsiz;
252 smtc_status |= SMTC_TLB_SHARED;
253 local_flush_tlb_all();
255 printk("TLB of %d entry pairs shared by %d VPEs\n",
258 printk("WARNING: TLB Not Sharable on SMTC Boot!\n");
265 * Incrementally build the CPU map out of constituent MIPS MT cores,
266 * using the specified available VPEs and TCs. Plaform code needs
267 * to ensure that each MIPS MT core invokes this routine on reset,
270 * This version of the build_cpu_map and prepare_cpus routines assumes
271 * that *all* TCs of a MIPS MT core will be used for Linux, and that
272 * they will be spread across *all* available VPEs (to minimise the
273 * loss of efficiency due to exception service serialization).
274 * An improved version would pick up configuration information and
275 * possibly leave some TCs/VPEs as "slave" processors.
277 * Use c0_MVPConf0 to find out how many TCs are available, setting up
278 * phys_cpu_present_map and the logical/physical mappings.
281 int __init mipsmt_build_cpu_map(int start_cpu_slot)
286 * The CPU map isn't actually used for anything at this point,
287 * so it's not clear what else we should do apart from set
288 * everything up so that "logical" = "physical".
290 ntcs = ((read_c0_mvpconf0() & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1;
291 for (i=start_cpu_slot; i<NR_CPUS && i<ntcs; i++) {
292 cpu_set(i, phys_cpu_present_map);
293 __cpu_number_map[i] = i;
294 __cpu_logical_map[i] = i;
296 /* Initialize map of CPUs with FPUs */
297 cpus_clear(mt_fpu_cpumask);
299 /* One of those TC's is the one booting, and not a secondary... */
300 printk("%i available secondary CPU TC(s)\n", i - 1);
306 * Common setup before any secondaries are started
307 * Make sure all CPU's are in a sensible state before we boot any of the
310 * For MIPS MT "SMTC" operation, we set up all TCs, spread as evenly
311 * as possible across the available VPEs.
314 static void smtc_tc_setup(int vpe, int tc, int cpu)
317 write_tc_c0_tchalt(TCHALT_H);
319 write_tc_c0_tcstatus((read_tc_c0_tcstatus()
320 & ~(TCSTATUS_TKSU | TCSTATUS_DA | TCSTATUS_IXMT))
322 write_tc_c0_tccontext(0);
324 write_tc_c0_tcbind(vpe);
325 /* In general, all TCs should have the same cpu_data indications */
326 memcpy(&cpu_data[cpu], &cpu_data[0], sizeof(struct cpuinfo_mips));
327 /* For 34Kf, start with TC/CPU 0 as sole owner of single FPU context */
328 if (cpu_data[0].cputype == CPU_34K)
329 cpu_data[cpu].options &= ~MIPS_CPU_FPU;
330 cpu_data[cpu].vpe_id = vpe;
331 cpu_data[cpu].tc_id = tc;
335 void mipsmt_prepare_cpus(void)
337 int i, vpe, tc, ntc, nvpe, tcpervpe, slop, cpu;
341 struct smtc_ipi *pipi;
343 /* disable interrupts so we can disable MT */
344 local_irq_save(flags);
345 /* disable MT so we can configure */
349 spin_lock_init(&freeIPIq.lock);
352 * We probably don't have as many VPEs as we do SMP "CPUs",
353 * but it's possible - and in any case we'll never use more!
355 for (i=0; i<NR_CPUS; i++) {
356 IPIQ[i].head = IPIQ[i].tail = NULL;
357 spin_lock_init(&IPIQ[i].lock);
359 ipi_timer_latch[i] = 0;
362 /* cpu_data index starts at zero */
364 cpu_data[cpu].vpe_id = 0;
365 cpu_data[cpu].tc_id = 0;
368 /* Report on boot-time options */
369 mips_mt_set_cpuoptions ();
371 printk("Limit of %d VPEs set\n", vpelimit);
373 printk("Limit of %d TCs set\n", tclimit);
375 printk("Shared TLB Use Inhibited - UNSAFE for Multi-VPE Operation\n");
378 printk("ASID mask value override to 0x%x\n", asidmask);
381 #ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG
383 printk("Logic Analyser Trigger on suspected TC hang\n");
384 #endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */
386 /* Put MVPE's into 'configuration state' */
387 write_c0_mvpcontrol( read_c0_mvpcontrol() | MVPCONTROL_VPC );
389 val = read_c0_mvpconf0();
390 nvpe = ((val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1;
391 if (vpelimit > 0 && nvpe > vpelimit)
393 ntc = ((val & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1;
396 if (tclimit > 0 && ntc > tclimit)
398 tcpervpe = ntc / nvpe;
399 slop = ntc % nvpe; /* Residual TCs, < NVPE */
401 /* Set up shared TLB */
402 smtc_configure_tlb();
404 for (tc = 0, vpe = 0 ; (vpe < nvpe) && (tc < ntc) ; vpe++) {
409 write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_MVP);
412 printk("VPE %d: TC", vpe);
413 for (i = 0; i < tcpervpe; i++) {
415 * TC 0 is bound to VPE 0 at reset,
416 * and is presumably executing this
417 * code. Leave it alone!
420 smtc_tc_setup(vpe,tc, cpu);
428 smtc_tc_setup(vpe,tc, cpu);
437 * Clear any stale software interrupts from VPE's Cause
439 write_vpe_c0_cause(0);
442 * Clear ERL/EXL of VPEs other than 0
443 * and set restricted interrupt enable/mask.
445 write_vpe_c0_status((read_vpe_c0_status()
446 & ~(ST0_BEV | ST0_ERL | ST0_EXL | ST0_IM))
447 | (STATUSF_IP0 | STATUSF_IP1 | STATUSF_IP7
450 * set config to be the same as vpe0,
451 * particularly kseg0 coherency alg
453 write_vpe_c0_config(read_c0_config());
454 /* Clear any pending timer interrupt */
455 write_vpe_c0_compare(0);
456 /* Propagate Config7 */
457 write_vpe_c0_config7(read_c0_config7());
458 write_vpe_c0_count(read_c0_count());
460 /* enable multi-threading within VPE */
461 write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() | VPECONTROL_TE);
463 write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_VPA);
467 * Pull any physically present but unused TCs out of circulation.
469 while (tc < (((val & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1)) {
470 cpu_clear(tc, phys_cpu_present_map);
471 cpu_clear(tc, cpu_present_map);
475 /* release config state */
476 write_c0_mvpcontrol( read_c0_mvpcontrol() & ~ MVPCONTROL_VPC );
480 /* Set up coprocessor affinity CPU mask(s) */
482 for (tc = 0; tc < ntc; tc++) {
483 if (cpu_data[tc].options & MIPS_CPU_FPU)
484 cpu_set(tc, mt_fpu_cpumask);
487 /* set up ipi interrupts... */
489 /* If we have multiple VPEs running, set up the cross-VPE interrupt */
491 setup_cross_vpe_interrupts(nvpe);
493 /* Set up queue of free IPI "messages". */
494 nipi = NR_CPUS * IPIBUF_PER_CPU;
498 pipi = kmalloc(nipi *sizeof(struct smtc_ipi), GFP_KERNEL);
500 panic("kmalloc of IPI message buffers failed\n");
502 printk("IPI buffer pool of %d buffers\n", nipi);
503 for (i = 0; i < nipi; i++) {
504 smtc_ipi_nq(&freeIPIq, pipi);
508 /* Arm multithreading and enable other VPEs - but all TCs are Halted */
511 local_irq_restore(flags);
512 /* Initialize SMTC /proc statistics/diagnostics */
518 * Setup the PC, SP, and GP of a secondary processor and start it
520 * smp_bootstrap is the place to resume from
521 * __KSTK_TOS(idle) is apparently the stack pointer
522 * (unsigned long)idle->thread_info the gp
525 void __cpuinit smtc_boot_secondary(int cpu, struct task_struct *idle)
527 extern u32 kernelsp[NR_CPUS];
532 if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) {
535 settc(cpu_data[cpu].tc_id);
538 write_tc_c0_tcrestart((unsigned long)&smp_bootstrap);
541 kernelsp[cpu] = __KSTK_TOS(idle);
542 write_tc_gpr_sp(__KSTK_TOS(idle));
545 write_tc_gpr_gp((unsigned long)task_thread_info(idle));
547 smtc_status |= SMTC_MTC_ACTIVE;
548 write_tc_c0_tchalt(0);
549 if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) {
555 void smtc_init_secondary(void)
558 * Start timer on secondary VPEs if necessary.
559 * plat_timer_setup has already have been invoked by init/main
560 * on "boot" TC. Like per_cpu_trap_init() hack, this assumes that
561 * SMTC init code assigns TCs consdecutively and in ascending order
562 * to across available VPEs.
564 if (((read_c0_tcbind() & TCBIND_CURTC) != 0) &&
565 ((read_c0_tcbind() & TCBIND_CURVPE)
566 != cpu_data[smp_processor_id() - 1].vpe_id)){
567 write_c0_compare (read_c0_count() + mips_hpt_frequency/HZ);
573 void smtc_smp_finish(void)
575 printk("TC %d going on-line as CPU %d\n",
576 cpu_data[smp_processor_id()].tc_id, smp_processor_id());
579 void smtc_cpus_done(void)
584 * Support for SMTC-optimized driver IRQ registration
588 * SMTC Kernel needs to manipulate low-level CPU interrupt mask
589 * in do_IRQ. These are passed in setup_irq_smtc() and stored
593 int setup_irq_smtc(unsigned int irq, struct irqaction * new,
594 unsigned long hwmask)
596 #ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG
597 unsigned int vpe = current_cpu_data.vpe_id;
599 vpemask[vpe][irq - MIPS_CPU_IRQ_BASE] = 1;
601 irq_hwmask[irq] = hwmask;
603 return setup_irq(irq, new);
607 * IPI model for SMTC is tricky, because interrupts aren't TC-specific.
608 * Within a VPE one TC can interrupt another by different approaches.
609 * The easiest to get right would probably be to make all TCs except
610 * the target IXMT and set a software interrupt, but an IXMT-based
611 * scheme requires that a handler must run before a new IPI could
612 * be sent, which would break the "broadcast" loops in MIPS MT.
613 * A more gonzo approach within a VPE is to halt the TC, extract
614 * its Restart, Status, and a couple of GPRs, and program the Restart
615 * address to emulate an interrupt.
617 * Within a VPE, one can be confident that the target TC isn't in
618 * a critical EXL state when halted, since the write to the Halt
619 * register could not have issued on the writing thread if the
620 * halting thread had EXL set. So k0 and k1 of the target TC
621 * can be used by the injection code. Across VPEs, one can't
622 * be certain that the target TC isn't in a critical exception
623 * state. So we try a two-step process of sending a software
624 * interrupt to the target VPE, which either handles the event
625 * itself (if it was the target) or injects the event within
629 static void smtc_ipi_qdump(void)
633 for (i = 0; i < NR_CPUS ;i++) {
634 printk("IPIQ[%d]: head = 0x%x, tail = 0x%x, depth = %d\n",
635 i, (unsigned)IPIQ[i].head, (unsigned)IPIQ[i].tail,
641 * The standard atomic.h primitives don't quite do what we want
642 * here: We need an atomic add-and-return-previous-value (which
643 * could be done with atomic_add_return and a decrement) and an
644 * atomic set/zero-and-return-previous-value (which can't really
645 * be done with the atomic.h primitives). And since this is
646 * MIPS MT, we can assume that we have LL/SC.
648 static __inline__ int atomic_postincrement(unsigned int *pv)
650 unsigned long result;
654 __asm__ __volatile__(
660 : "=&r" (result), "=&r" (temp), "=m" (*pv)
667 void smtc_send_ipi(int cpu, int type, unsigned int action)
670 struct smtc_ipi *pipi;
674 if (cpu == smp_processor_id()) {
675 printk("Cannot Send IPI to self!\n");
678 /* Set up a descriptor, to be delivered either promptly or queued */
679 pipi = smtc_ipi_dq(&freeIPIq);
682 mips_mt_regdump(dvpe());
683 panic("IPI Msg. Buffers Depleted\n");
686 pipi->arg = (void *)action;
688 if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) {
689 /* If not on same VPE, enqueue and send cross-VPE interupt */
690 smtc_ipi_nq(&IPIQ[cpu], pipi);
692 settc(cpu_data[cpu].tc_id);
693 write_vpe_c0_cause(read_vpe_c0_cause() | C_SW1);
697 * Not sufficient to do a LOCK_MT_PRA (dmt) here,
698 * since ASID shootdown on the other VPE may
699 * collide with this operation.
702 settc(cpu_data[cpu].tc_id);
703 /* Halt the targeted TC */
704 write_tc_c0_tchalt(TCHALT_H);
708 * Inspect TCStatus - if IXMT is set, we have to queue
709 * a message. Otherwise, we set up the "interrupt"
712 tcstatus = read_tc_c0_tcstatus();
714 if ((tcstatus & TCSTATUS_IXMT) != 0) {
716 * Spin-waiting here can deadlock,
717 * so we queue the message for the target TC.
719 write_tc_c0_tchalt(0);
721 /* Try to reduce redundant timer interrupt messages */
722 if (type == SMTC_CLOCK_TICK) {
723 if (atomic_postincrement(&ipi_timer_latch[cpu])!=0){
724 smtc_ipi_nq(&freeIPIq, pipi);
728 smtc_ipi_nq(&IPIQ[cpu], pipi);
730 post_direct_ipi(cpu, pipi);
731 write_tc_c0_tchalt(0);
738 * Send IPI message to Halted TC, TargTC/TargVPE already having been set
740 static void post_direct_ipi(int cpu, struct smtc_ipi *pipi)
742 struct pt_regs *kstack;
743 unsigned long tcstatus;
744 unsigned long tcrestart;
745 extern u32 kernelsp[NR_CPUS];
746 extern void __smtc_ipi_vector(void);
748 /* Extract Status, EPC from halted TC */
749 tcstatus = read_tc_c0_tcstatus();
750 tcrestart = read_tc_c0_tcrestart();
751 /* If TCRestart indicates a WAIT instruction, advance the PC */
752 if ((tcrestart & 0x80000000)
753 && ((*(unsigned int *)tcrestart & 0xfe00003f) == 0x42000020)) {
757 * Save on TC's future kernel stack
759 * CU bit of Status is indicator that TC was
760 * already running on a kernel stack...
762 if (tcstatus & ST0_CU0) {
763 /* Note that this "- 1" is pointer arithmetic */
764 kstack = ((struct pt_regs *)read_tc_gpr_sp()) - 1;
766 kstack = ((struct pt_regs *)kernelsp[cpu]) - 1;
769 kstack->cp0_epc = (long)tcrestart;
771 kstack->cp0_tcstatus = tcstatus;
772 /* Pass token of operation to be performed kernel stack pad area */
773 kstack->pad0[4] = (unsigned long)pipi;
774 /* Pass address of function to be called likewise */
775 kstack->pad0[5] = (unsigned long)&ipi_decode;
776 /* Set interrupt exempt and kernel mode */
777 tcstatus |= TCSTATUS_IXMT;
778 tcstatus &= ~TCSTATUS_TKSU;
779 write_tc_c0_tcstatus(tcstatus);
781 /* Set TC Restart address to be SMTC IPI vector */
782 write_tc_c0_tcrestart(__smtc_ipi_vector);
785 static void ipi_resched_interrupt(void)
787 /* Return from interrupt should be enough to cause scheduler check */
791 static void ipi_call_interrupt(void)
793 /* Invoke generic function invocation code in smp.c */
794 smp_call_function_interrupt();
797 void ipi_decode(struct smtc_ipi *pipi)
799 void *arg_copy = pipi->arg;
800 int type_copy = pipi->type;
801 int dest_copy = pipi->dest;
803 smtc_ipi_nq(&freeIPIq, pipi);
805 case SMTC_CLOCK_TICK:
807 kstat_this_cpu.irqs[MIPS_CPU_IRQ_BASE + cp0_compare_irq]++;
808 /* Invoke Clock "Interrupt" */
809 ipi_timer_latch[dest_copy] = 0;
810 #ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG
811 clock_hang_reported[dest_copy] = 0;
812 #endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */
813 local_timer_interrupt(0, NULL);
817 switch ((int)arg_copy) {
818 case SMP_RESCHEDULE_YOURSELF:
819 ipi_resched_interrupt();
821 case SMP_CALL_FUNCTION:
822 ipi_call_interrupt();
825 printk("Impossible SMTC IPI Argument 0x%x\n",
831 printk("Impossible SMTC IPI Type 0x%x\n", type_copy);
836 void deferred_smtc_ipi(void)
838 struct smtc_ipi *pipi;
841 int q = smp_processor_id();
844 * Test is not atomic, but much faster than a dequeue,
845 * and the vast majority of invocations will have a null queue.
847 if (IPIQ[q].head != NULL) {
848 while((pipi = smtc_ipi_dq(&IPIQ[q])) != NULL) {
849 /* ipi_decode() should be called with interrupts off */
850 local_irq_save(flags);
852 local_irq_restore(flags);
858 * Send clock tick to all TCs except the one executing the funtion
861 void smtc_timer_broadcast(void)
864 int myTC = cpu_data[smp_processor_id()].tc_id;
865 int myVPE = cpu_data[smp_processor_id()].vpe_id;
867 smtc_cpu_stats[smp_processor_id()].timerints++;
869 for_each_online_cpu(cpu) {
870 if (cpu_data[cpu].vpe_id == myVPE &&
871 cpu_data[cpu].tc_id != myTC)
872 smtc_send_ipi(cpu, SMTC_CLOCK_TICK, 0);
877 * Cross-VPE interrupts in the SMTC prototype use "software interrupts"
878 * set via cross-VPE MTTR manipulation of the Cause register. It would be
879 * in some regards preferable to have external logic for "doorbell" hardware
883 static int cpu_ipi_irq = MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_IRQ;
885 static irqreturn_t ipi_interrupt(int irq, void *dev_idm)
887 int my_vpe = cpu_data[smp_processor_id()].vpe_id;
888 int my_tc = cpu_data[smp_processor_id()].tc_id;
890 struct smtc_ipi *pipi;
891 unsigned long tcstatus;
894 unsigned int mtflags;
895 unsigned int vpflags;
898 * So long as cross-VPE interrupts are done via
899 * MFTR/MTTR read-modify-writes of Cause, we need
900 * to stop other VPEs whenever the local VPE does
903 local_irq_save(flags);
905 clear_c0_cause(0x100 << MIPS_CPU_IPI_IRQ);
906 set_c0_status(0x100 << MIPS_CPU_IPI_IRQ);
909 local_irq_restore(flags);
912 * Cross-VPE Interrupt handler: Try to directly deliver IPIs
913 * queued for TCs on this VPE other than the current one.
914 * Return-from-interrupt should cause us to drain the queue
915 * for the current TC, so we ought not to have to do it explicitly here.
918 for_each_online_cpu(cpu) {
919 if (cpu_data[cpu].vpe_id != my_vpe)
922 pipi = smtc_ipi_dq(&IPIQ[cpu]);
924 if (cpu_data[cpu].tc_id != my_tc) {
927 settc(cpu_data[cpu].tc_id);
928 write_tc_c0_tchalt(TCHALT_H);
930 tcstatus = read_tc_c0_tcstatus();
931 if ((tcstatus & TCSTATUS_IXMT) == 0) {
932 post_direct_ipi(cpu, pipi);
935 write_tc_c0_tchalt(0);
938 smtc_ipi_req(&IPIQ[cpu], pipi);
942 * ipi_decode() should be called
943 * with interrupts off
945 local_irq_save(flags);
947 local_irq_restore(flags);
955 static void ipi_irq_dispatch(void)
960 static struct irqaction irq_ipi = {
961 .handler = ipi_interrupt,
962 .flags = IRQF_DISABLED,
967 static void setup_cross_vpe_interrupts(unsigned int nvpe)
973 panic("SMTC Kernel requires Vectored Interupt support");
975 set_vi_handler(MIPS_CPU_IPI_IRQ, ipi_irq_dispatch);
977 setup_irq_smtc(cpu_ipi_irq, &irq_ipi, (0x100 << MIPS_CPU_IPI_IRQ));
979 set_irq_handler(cpu_ipi_irq, handle_percpu_irq);
983 * SMTC-specific hacks invoked from elsewhere in the kernel.
985 * smtc_ipi_replay is called from raw_local_irq_restore which is only ever
986 * called with interrupts disabled. We do rely on interrupts being disabled
987 * here because using spin_lock_irqsave()/spin_unlock_irqrestore() would
988 * result in a recursive call to raw_local_irq_restore().
991 static void __smtc_ipi_replay(void)
993 unsigned int cpu = smp_processor_id();
996 * To the extent that we've ever turned interrupts off,
997 * we may have accumulated deferred IPIs. This is subtle.
998 * If we use the smtc_ipi_qdepth() macro, we'll get an
999 * exact number - but we'll also disable interrupts
1000 * and create a window of failure where a new IPI gets
1001 * queued after we test the depth but before we re-enable
1002 * interrupts. So long as IXMT never gets set, however,
1003 * we should be OK: If we pick up something and dispatch
1004 * it here, that's great. If we see nothing, but concurrent
1005 * with this operation, another TC sends us an IPI, IXMT
1006 * is clear, and we'll handle it as a real pseudo-interrupt
1007 * and not a pseudo-pseudo interrupt.
1009 if (IPIQ[cpu].depth > 0) {
1011 struct smtc_ipi_q *q = &IPIQ[cpu];
1012 struct smtc_ipi *pipi;
1013 extern void self_ipi(struct smtc_ipi *);
1015 spin_lock(&q->lock);
1016 pipi = __smtc_ipi_dq(q);
1017 spin_unlock(&q->lock);
1022 smtc_cpu_stats[cpu].selfipis++;
1027 void smtc_ipi_replay(void)
1029 raw_local_irq_disable();
1030 __smtc_ipi_replay();
1033 EXPORT_SYMBOL(smtc_ipi_replay);
1035 void smtc_idle_loop_hook(void)
1037 #ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG
1046 * printk within DMT-protected regions can deadlock,
1047 * so buffer diagnostic messages for later output.
1050 char id_ho_db_msg[768]; /* worst-case use should be less than 700 */
1052 if (atomic_read(&idle_hook_initialized) == 0) { /* fast test */
1053 if (atomic_add_return(1, &idle_hook_initialized) == 1) {
1055 /* Tedious stuff to just do once */
1056 mvpconf0 = read_c0_mvpconf0();
1057 hook_ntcs = ((mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1;
1058 if (hook_ntcs > NR_CPUS)
1059 hook_ntcs = NR_CPUS;
1060 for (tc = 0; tc < hook_ntcs; tc++) {
1062 clock_hang_reported[tc] = 0;
1064 for (vpe = 0; vpe < 2; vpe++)
1065 for (im = 0; im < 8; im++)
1066 imstuckcount[vpe][im] = 0;
1067 printk("Idle loop test hook initialized for %d TCs\n", hook_ntcs);
1068 atomic_set(&idle_hook_initialized, 1000);
1070 /* Someone else is initializing in parallel - let 'em finish */
1071 while (atomic_read(&idle_hook_initialized) < 1000)
1076 /* Have we stupidly left IXMT set somewhere? */
1077 if (read_c0_tcstatus() & 0x400) {
1078 write_c0_tcstatus(read_c0_tcstatus() & ~0x400);
1080 printk("Dangling IXMT in cpu_idle()\n");
1083 /* Have we stupidly left an IM bit turned off? */
1084 #define IM_LIMIT 2000
1085 local_irq_save(flags);
1087 pdb_msg = &id_ho_db_msg[0];
1088 im = read_c0_status();
1089 vpe = current_cpu_data.vpe_id;
1090 for (bit = 0; bit < 8; bit++) {
1092 * In current prototype, I/O interrupts
1093 * are masked for VPE > 0
1095 if (vpemask[vpe][bit]) {
1096 if (!(im & (0x100 << bit)))
1097 imstuckcount[vpe][bit]++;
1099 imstuckcount[vpe][bit] = 0;
1100 if (imstuckcount[vpe][bit] > IM_LIMIT) {
1101 set_c0_status(0x100 << bit);
1103 imstuckcount[vpe][bit] = 0;
1104 pdb_msg += sprintf(pdb_msg,
1105 "Dangling IM %d fixed for VPE %d\n", bit,
1112 * Now that we limit outstanding timer IPIs, check for hung TC
1114 for (tc = 0; tc < NR_CPUS; tc++) {
1115 /* Don't check ourself - we'll dequeue IPIs just below */
1116 if ((tc != smp_processor_id()) &&
1117 ipi_timer_latch[tc] > timerq_limit) {
1118 if (clock_hang_reported[tc] == 0) {
1119 pdb_msg += sprintf(pdb_msg,
1120 "TC %d looks hung with timer latch at %d\n",
1121 tc, ipi_timer_latch[tc]);
1122 clock_hang_reported[tc]++;
1127 local_irq_restore(flags);
1128 if (pdb_msg != &id_ho_db_msg[0])
1129 printk("CPU%d: %s", smp_processor_id(), id_ho_db_msg);
1130 #endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */
1133 * Replay any accumulated deferred IPIs. If "Instant Replay"
1134 * is in use, there should never be any.
1136 #ifndef CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY
1138 unsigned long flags;
1140 local_irq_save(flags);
1141 __smtc_ipi_replay();
1142 local_irq_restore(flags);
1144 #endif /* CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY */
1147 void smtc_soft_dump(void)
1151 printk("Counter Interrupts taken per CPU (TC)\n");
1152 for (i=0; i < NR_CPUS; i++) {
1153 printk("%d: %ld\n", i, smtc_cpu_stats[i].timerints);
1155 printk("Self-IPI invocations:\n");
1156 for (i=0; i < NR_CPUS; i++) {
1157 printk("%d: %ld\n", i, smtc_cpu_stats[i].selfipis);
1160 printk("Timer IPI Backlogs:\n");
1161 for (i=0; i < NR_CPUS; i++) {
1162 printk("%d: %d\n", i, ipi_timer_latch[i]);
1164 printk("%d Recoveries of \"stolen\" FPU\n",
1165 atomic_read(&smtc_fpu_recoveries));
1170 * TLB management routines special to SMTC
1173 void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
1175 unsigned long flags, mtflags, tcstat, prevhalt, asid;
1179 * It would be nice to be able to use a spinlock here,
1180 * but this is invoked from within TLB flush routines
1181 * that protect themselves with DVPE, so if a lock is
1182 * held by another TC, it'll never be freed.
1184 * DVPE/DMT must not be done with interrupts enabled,
1185 * so even so most callers will already have disabled
1186 * them, let's be really careful...
1189 local_irq_save(flags);
1190 if (smtc_status & SMTC_TLB_SHARED) {
1195 tlb = cpu_data[cpu].vpe_id;
1197 asid = asid_cache(cpu);
1200 if (!((asid += ASID_INC) & ASID_MASK) ) {
1201 if (cpu_has_vtag_icache)
1203 /* Traverse all online CPUs (hack requires contigous range) */
1204 for (i = 0; i < num_online_cpus(); i++) {
1206 * We don't need to worry about our own CPU, nor those of
1207 * CPUs who don't share our TLB.
1209 if ((i != smp_processor_id()) &&
1210 ((smtc_status & SMTC_TLB_SHARED) ||
1211 (cpu_data[i].vpe_id == cpu_data[cpu].vpe_id))) {
1212 settc(cpu_data[i].tc_id);
1213 prevhalt = read_tc_c0_tchalt() & TCHALT_H;
1215 write_tc_c0_tchalt(TCHALT_H);
1218 tcstat = read_tc_c0_tcstatus();
1219 smtc_live_asid[tlb][(tcstat & ASID_MASK)] |= (asiduse)(0x1 << i);
1221 write_tc_c0_tchalt(0);
1224 if (!asid) /* fix version if needed */
1225 asid = ASID_FIRST_VERSION;
1226 local_flush_tlb_all(); /* start new asid cycle */
1228 } while (smtc_live_asid[tlb][(asid & ASID_MASK)]);
1231 * SMTC shares the TLB within VPEs and possibly across all VPEs.
1233 for (i = 0; i < num_online_cpus(); i++) {
1234 if ((smtc_status & SMTC_TLB_SHARED) ||
1235 (cpu_data[i].vpe_id == cpu_data[cpu].vpe_id))
1236 cpu_context(i, mm) = asid_cache(i) = asid;
1239 if (smtc_status & SMTC_TLB_SHARED)
1243 local_irq_restore(flags);
1247 * Invoked from macros defined in mmu_context.h
1248 * which must already have disabled interrupts
1249 * and done a DVPE or DMT as appropriate.
1252 void smtc_flush_tlb_asid(unsigned long asid)
1257 entry = read_c0_wired();
1259 /* Traverse all non-wired entries */
1260 while (entry < current_cpu_data.tlbsize) {
1261 write_c0_index(entry);
1265 ehi = read_c0_entryhi();
1266 if ((ehi & ASID_MASK) == asid) {
1268 * Invalidate only entries with specified ASID,
1269 * makiing sure all entries differ.
1271 write_c0_entryhi(CKSEG0 + (entry << (PAGE_SHIFT + 1)));
1272 write_c0_entrylo0(0);
1273 write_c0_entrylo1(0);
1275 tlb_write_indexed();
1279 write_c0_index(PARKED_INDEX);
1284 * Support for single-threading cache flush operations.
1287 static int halt_state_save[NR_CPUS];
1290 * To really, really be sure that nothing is being done
1291 * by other TCs, halt them all. This code assumes that
1292 * a DVPE has already been done, so while their Halted
1293 * state is theoretically architecturally unstable, in
1294 * practice, it's not going to change while we're looking
1298 void smtc_cflush_lockdown(void)
1302 for_each_online_cpu(cpu) {
1303 if (cpu != smp_processor_id()) {
1304 settc(cpu_data[cpu].tc_id);
1305 halt_state_save[cpu] = read_tc_c0_tchalt();
1306 write_tc_c0_tchalt(TCHALT_H);
1312 /* It would be cheating to change the cpu_online states during a flush! */
1314 void smtc_cflush_release(void)
1319 * Start with a hazard barrier to ensure
1320 * that all CACHE ops have played through.
1324 for_each_online_cpu(cpu) {
1325 if (cpu != smp_processor_id()) {
1326 settc(cpu_data[cpu].tc_id);
1327 write_tc_c0_tchalt(halt_state_save[cpu]);